prompt
stringlengths 162
4.26M
| response
stringlengths 109
5.16M
|
---|---|
Generate the Verilog code corresponding to the following Chisel files.
File ClockDomain.scala:
package freechips.rocketchip.prci
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
abstract class Domain(implicit p: Parameters) extends LazyModule with HasDomainCrossing
{
def clockBundle: ClockBundle
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
childClock := clockBundle.clock
childReset := clockBundle.reset
override def provideImplicitClockToLazyChildren = true
// these are just for backwards compatibility with external devices
// that were manually wiring themselves to the domain's clock/reset input:
val clock = IO(Output(chiselTypeOf(clockBundle.clock)))
val reset = IO(Output(chiselTypeOf(clockBundle.reset)))
clock := clockBundle.clock
reset := clockBundle.reset
}
}
abstract class ClockDomain(implicit p: Parameters) extends Domain with HasClockDomainCrossing
class ClockSinkDomain(val clockSinkParams: ClockSinkParameters)(implicit p: Parameters) extends ClockDomain
{
def this(take: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSinkParameters(take = take, name = name))
val clockNode = ClockSinkNode(Seq(clockSinkParams))
def clockBundle = clockNode.in.head._1
override lazy val desiredName = (clockSinkParams.name.toSeq :+ "ClockSinkDomain").mkString
}
class ClockSourceDomain(val clockSourceParams: ClockSourceParameters)(implicit p: Parameters) extends ClockDomain
{
def this(give: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSourceParameters(give = give, name = name))
val clockNode = ClockSourceNode(Seq(clockSourceParams))
def clockBundle = clockNode.out.head._1
override lazy val desiredName = (clockSourceParams.name.toSeq :+ "ClockSourceDomain").mkString
}
abstract class ResetDomain(implicit p: Parameters) extends Domain with HasResetDomainCrossing
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File NoC.scala:
package constellation.noc
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp, BundleBridgeSink, InModuleBody}
import freechips.rocketchip.util.ElaborationArtefacts
import freechips.rocketchip.prci._
import constellation.router._
import constellation.channel._
import constellation.routing.{RoutingRelation, ChannelRoutingInfo}
import constellation.topology.{PhysicalTopology, UnidirectionalLine}
class NoCTerminalIO(
val ingressParams: Seq[IngressChannelParams],
val egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Bundle {
val ingress = MixedVec(ingressParams.map { u => Flipped(new IngressChannel(u)) })
val egress = MixedVec(egressParams.map { u => new EgressChannel(u) })
}
class NoC(nocParams: NoCParams)(implicit p: Parameters) extends LazyModule {
override def shouldBeInlined = nocParams.inlineNoC
val internalParams = InternalNoCParams(nocParams)
val allChannelParams = internalParams.channelParams
val allIngressParams = internalParams.ingressParams
val allEgressParams = internalParams.egressParams
val allRouterParams = internalParams.routerParams
val iP = p.alterPartial({ case InternalNoCKey => internalParams })
val nNodes = nocParams.topology.nNodes
val nocName = nocParams.nocName
val skipValidationChecks = nocParams.skipValidationChecks
val clockSourceNodes = Seq.tabulate(nNodes) { i => ClockSourceNode(Seq(ClockSourceParameters())) }
val router_sink_domains = Seq.tabulate(nNodes) { i =>
val router_sink_domain = LazyModule(new ClockSinkDomain(ClockSinkParameters(
name = Some(s"${nocName}_router_$i")
)))
router_sink_domain.clockNode := clockSourceNodes(i)
router_sink_domain
}
val routers = Seq.tabulate(nNodes) { i => router_sink_domains(i) {
val inParams = allChannelParams.filter(_.destId == i).map(
_.copy(payloadBits=allRouterParams(i).user.payloadBits)
)
val outParams = allChannelParams.filter(_.srcId == i).map(
_.copy(payloadBits=allRouterParams(i).user.payloadBits)
)
val ingressParams = allIngressParams.filter(_.destId == i).map(
_.copy(payloadBits=allRouterParams(i).user.payloadBits)
)
val egressParams = allEgressParams.filter(_.srcId == i).map(
_.copy(payloadBits=allRouterParams(i).user.payloadBits)
)
val noIn = inParams.size + ingressParams.size == 0
val noOut = outParams.size + egressParams.size == 0
if (noIn || noOut) {
println(s"Constellation WARNING: $nocName router $i seems to be unused, it will not be generated")
None
} else {
Some(LazyModule(new Router(
routerParams = allRouterParams(i),
preDiplomaticInParams = inParams,
preDiplomaticIngressParams = ingressParams,
outDests = outParams.map(_.destId),
egressIds = egressParams.map(_.egressId)
)(iP)))
}
}}.flatten
val ingressNodes = allIngressParams.map { u => IngressChannelSourceNode(u.destId) }
val egressNodes = allEgressParams.map { u => EgressChannelDestNode(u) }
// Generate channels between routers diplomatically
Seq.tabulate(nNodes, nNodes) { case (i, j) => if (i != j) {
val routerI = routers.find(_.nodeId == i)
val routerJ = routers.find(_.nodeId == j)
if (routerI.isDefined && routerJ.isDefined) {
val sourceNodes: Seq[ChannelSourceNode] = routerI.get.sourceNodes.filter(_.destId == j)
val destNodes: Seq[ChannelDestNode] = routerJ.get.destNodes.filter(_.destParams.srcId == i)
require (sourceNodes.size == destNodes.size)
(sourceNodes zip destNodes).foreach { case (src, dst) =>
val channelParam = allChannelParams.find(c => c.srcId == i && c.destId == j).get
router_sink_domains(j) {
implicit val p: Parameters = iP
(dst
:= ChannelWidthWidget(routerJ.get.payloadBits, routerI.get.payloadBits)
:= channelParam.channelGen(p)(src)
)
}
}
}
}}
// Generate terminal channels diplomatically
routers.foreach { dst => router_sink_domains(dst.nodeId) {
implicit val p: Parameters = iP
dst.ingressNodes.foreach(n => {
val ingressId = n.destParams.ingressId
require(dst.payloadBits <= allIngressParams(ingressId).payloadBits)
(n
:= IngressWidthWidget(dst.payloadBits, allIngressParams(ingressId).payloadBits)
:= ingressNodes(ingressId)
)
})
dst.egressNodes.foreach(n => {
val egressId = n.egressId
require(dst.payloadBits <= allEgressParams(egressId).payloadBits)
(egressNodes(egressId)
:= EgressWidthWidget(allEgressParams(egressId).payloadBits, dst.payloadBits)
:= n
)
})
}}
val debugNodes = routers.map { r =>
val sink = BundleBridgeSink[DebugBundle]()
sink := r.debugNode
sink
}
val ctrlNodes = if (nocParams.hasCtrl) {
(0 until nNodes).map { i =>
routers.find(_.nodeId == i).map { r =>
val sink = BundleBridgeSink[RouterCtrlBundle]()
sink := r.ctrlNode.get
sink
}
}
} else {
Nil
}
println(s"Constellation: $nocName Finished parameter validation")
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
println(s"Constellation: $nocName Starting NoC RTL generation")
val io = IO(new NoCTerminalIO(allIngressParams, allEgressParams)(iP) {
val router_clocks = Vec(nNodes, Input(new ClockBundle(ClockBundleParameters())))
val router_ctrl = if (nocParams.hasCtrl) Vec(nNodes, new RouterCtrlBundle) else Nil
})
(io.ingress zip ingressNodes.map(_.out(0)._1)).foreach { case (l,r) => r <> l }
(io.egress zip egressNodes .map(_.in (0)._1)).foreach { case (l,r) => l <> r }
(io.router_clocks zip clockSourceNodes.map(_.out(0)._1)).foreach { case (l,r) => l <> r }
if (nocParams.hasCtrl) {
ctrlNodes.zipWithIndex.map { case (c,i) =>
if (c.isDefined) {
io.router_ctrl(i) <> c.get.in(0)._1
} else {
io.router_ctrl(i) <> DontCare
}
}
}
// TODO: These assume a single clock-domain across the entire noc
val debug_va_stall_ctr = RegInit(0.U(64.W))
val debug_sa_stall_ctr = RegInit(0.U(64.W))
val debug_any_stall_ctr = debug_va_stall_ctr + debug_sa_stall_ctr
debug_va_stall_ctr := debug_va_stall_ctr + debugNodes.map(_.in(0)._1.va_stall.reduce(_+_)).reduce(_+_)
debug_sa_stall_ctr := debug_sa_stall_ctr + debugNodes.map(_.in(0)._1.sa_stall.reduce(_+_)).reduce(_+_)
dontTouch(debug_va_stall_ctr)
dontTouch(debug_sa_stall_ctr)
dontTouch(debug_any_stall_ctr)
def prepend(s: String) = Seq(nocName, s).mkString(".")
ElaborationArtefacts.add(prepend("noc.graphml"), graphML)
val adjList = routers.map { r =>
val outs = r.outParams.map(o => s"${o.destId}").mkString(" ")
val egresses = r.egressParams.map(e => s"e${e.egressId}").mkString(" ")
val ingresses = r.ingressParams.map(i => s"i${i.ingressId} ${r.nodeId}")
(Seq(s"${r.nodeId} $outs $egresses") ++ ingresses).mkString("\n")
}.mkString("\n")
ElaborationArtefacts.add(prepend("noc.adjlist"), adjList)
val xys = routers.map(r => {
val n = r.nodeId
val ids = (Seq(r.nodeId.toString)
++ r.egressParams.map(e => s"e${e.egressId}")
++ r.ingressParams.map(i => s"i${i.ingressId}")
)
val plotter = nocParams.topology.plotter
val coords = (Seq(plotter.node(r.nodeId))
++ Seq.tabulate(r.egressParams.size ) { i => plotter. egress(i, r. egressParams.size, r.nodeId) }
++ Seq.tabulate(r.ingressParams.size) { i => plotter.ingress(i, r.ingressParams.size, r.nodeId) }
)
(ids zip coords).map { case (i, (x, y)) => s"$i $x $y" }.mkString("\n")
}).mkString("\n")
ElaborationArtefacts.add(prepend("noc.xy"), xys)
val edgeProps = routers.map { r =>
val outs = r.outParams.map { o =>
(Seq(s"${r.nodeId} ${o.destId}") ++ (if (o.possibleFlows.size == 0) Some("unused") else None))
.mkString(" ")
}
val egresses = r.egressParams.map { e =>
(Seq(s"${r.nodeId} e${e.egressId}") ++ (if (e.possibleFlows.size == 0) Some("unused") else None))
.mkString(" ")
}
val ingresses = r.ingressParams.map { i =>
(Seq(s"i${i.ingressId} ${r.nodeId}") ++ (if (i.possibleFlows.size == 0) Some("unused") else None))
.mkString(" ")
}
(outs ++ egresses ++ ingresses).mkString("\n")
}.mkString("\n")
ElaborationArtefacts.add(prepend("noc.edgeprops"), edgeProps)
println(s"Constellation: $nocName Finished NoC RTL generation")
}
}
| module TLNoC_router_1ClockSinkDomain( // @[ClockDomain.scala:14:9]
output [2:0] auto_routers_debug_out_va_stall_0, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_debug_out_va_stall_1, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_debug_out_va_stall_2, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_debug_out_va_stall_3, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_debug_out_sa_stall_0, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_debug_out_sa_stall_1, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_debug_out_sa_stall_2, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_debug_out_sa_stall_3, // @[LazyModuleImp.scala:107:25]
input auto_routers_egress_nodes_out_1_flit_ready, // @[LazyModuleImp.scala:107:25]
output auto_routers_egress_nodes_out_1_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_routers_egress_nodes_out_1_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_routers_egress_nodes_out_1_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_routers_egress_nodes_out_1_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input auto_routers_egress_nodes_out_0_flit_ready, // @[LazyModuleImp.scala:107:25]
output auto_routers_egress_nodes_out_0_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_routers_egress_nodes_out_0_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_routers_egress_nodes_out_0_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_routers_egress_nodes_out_0_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
output auto_routers_ingress_nodes_in_2_flit_ready, // @[LazyModuleImp.scala:107:25]
input auto_routers_ingress_nodes_in_2_flit_valid, // @[LazyModuleImp.scala:107:25]
input auto_routers_ingress_nodes_in_2_flit_bits_head, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_routers_ingress_nodes_in_2_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_routers_ingress_nodes_in_2_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25]
output auto_routers_ingress_nodes_in_1_flit_ready, // @[LazyModuleImp.scala:107:25]
input auto_routers_ingress_nodes_in_1_flit_valid, // @[LazyModuleImp.scala:107:25]
input auto_routers_ingress_nodes_in_1_flit_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_routers_ingress_nodes_in_1_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_routers_ingress_nodes_in_1_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_routers_ingress_nodes_in_1_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25]
output auto_routers_ingress_nodes_in_0_flit_ready, // @[LazyModuleImp.scala:107:25]
input auto_routers_ingress_nodes_in_0_flit_valid, // @[LazyModuleImp.scala:107:25]
input auto_routers_ingress_nodes_in_0_flit_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_routers_ingress_nodes_in_0_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_routers_ingress_nodes_in_0_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_routers_ingress_nodes_in_0_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25]
output auto_routers_source_nodes_out_flit_0_valid, // @[LazyModuleImp.scala:107:25]
output auto_routers_source_nodes_out_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_routers_source_nodes_out_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_routers_source_nodes_out_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_source_nodes_out_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_routers_source_nodes_out_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_source_nodes_out_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_routers_source_nodes_out_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_source_nodes_out_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_source_nodes_out_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_routers_source_nodes_out_credit_return, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_routers_source_nodes_out_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_routers_dest_nodes_in_flit_0_valid, // @[LazyModuleImp.scala:107:25]
input auto_routers_dest_nodes_in_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_routers_dest_nodes_in_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_routers_dest_nodes_in_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_routers_dest_nodes_in_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_routers_dest_nodes_in_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_routers_dest_nodes_in_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_routers_dest_nodes_in_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_routers_dest_nodes_in_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_routers_dest_nodes_in_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_routers_dest_nodes_in_credit_return, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_routers_dest_nodes_in_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_clock_in_clock, // @[LazyModuleImp.scala:107:25]
input auto_clock_in_reset // @[LazyModuleImp.scala:107:25]
);
Router_1 routers ( // @[NoC.scala:67:22]
.clock (auto_clock_in_clock),
.reset (auto_clock_in_reset),
.auto_debug_out_va_stall_0 (auto_routers_debug_out_va_stall_0),
.auto_debug_out_va_stall_1 (auto_routers_debug_out_va_stall_1),
.auto_debug_out_va_stall_2 (auto_routers_debug_out_va_stall_2),
.auto_debug_out_va_stall_3 (auto_routers_debug_out_va_stall_3),
.auto_debug_out_sa_stall_0 (auto_routers_debug_out_sa_stall_0),
.auto_debug_out_sa_stall_1 (auto_routers_debug_out_sa_stall_1),
.auto_debug_out_sa_stall_2 (auto_routers_debug_out_sa_stall_2),
.auto_debug_out_sa_stall_3 (auto_routers_debug_out_sa_stall_3),
.auto_egress_nodes_out_1_flit_ready (auto_routers_egress_nodes_out_1_flit_ready),
.auto_egress_nodes_out_1_flit_valid (auto_routers_egress_nodes_out_1_flit_valid),
.auto_egress_nodes_out_1_flit_bits_head (auto_routers_egress_nodes_out_1_flit_bits_head),
.auto_egress_nodes_out_1_flit_bits_tail (auto_routers_egress_nodes_out_1_flit_bits_tail),
.auto_egress_nodes_out_1_flit_bits_payload (auto_routers_egress_nodes_out_1_flit_bits_payload),
.auto_egress_nodes_out_0_flit_ready (auto_routers_egress_nodes_out_0_flit_ready),
.auto_egress_nodes_out_0_flit_valid (auto_routers_egress_nodes_out_0_flit_valid),
.auto_egress_nodes_out_0_flit_bits_head (auto_routers_egress_nodes_out_0_flit_bits_head),
.auto_egress_nodes_out_0_flit_bits_tail (auto_routers_egress_nodes_out_0_flit_bits_tail),
.auto_egress_nodes_out_0_flit_bits_payload (auto_routers_egress_nodes_out_0_flit_bits_payload),
.auto_ingress_nodes_in_2_flit_ready (auto_routers_ingress_nodes_in_2_flit_ready),
.auto_ingress_nodes_in_2_flit_valid (auto_routers_ingress_nodes_in_2_flit_valid),
.auto_ingress_nodes_in_2_flit_bits_head (auto_routers_ingress_nodes_in_2_flit_bits_head),
.auto_ingress_nodes_in_2_flit_bits_payload (auto_routers_ingress_nodes_in_2_flit_bits_payload),
.auto_ingress_nodes_in_2_flit_bits_egress_id (auto_routers_ingress_nodes_in_2_flit_bits_egress_id),
.auto_ingress_nodes_in_1_flit_ready (auto_routers_ingress_nodes_in_1_flit_ready),
.auto_ingress_nodes_in_1_flit_valid (auto_routers_ingress_nodes_in_1_flit_valid),
.auto_ingress_nodes_in_1_flit_bits_head (auto_routers_ingress_nodes_in_1_flit_bits_head),
.auto_ingress_nodes_in_1_flit_bits_tail (auto_routers_ingress_nodes_in_1_flit_bits_tail),
.auto_ingress_nodes_in_1_flit_bits_payload (auto_routers_ingress_nodes_in_1_flit_bits_payload),
.auto_ingress_nodes_in_1_flit_bits_egress_id (auto_routers_ingress_nodes_in_1_flit_bits_egress_id),
.auto_ingress_nodes_in_0_flit_ready (auto_routers_ingress_nodes_in_0_flit_ready),
.auto_ingress_nodes_in_0_flit_valid (auto_routers_ingress_nodes_in_0_flit_valid),
.auto_ingress_nodes_in_0_flit_bits_head (auto_routers_ingress_nodes_in_0_flit_bits_head),
.auto_ingress_nodes_in_0_flit_bits_tail (auto_routers_ingress_nodes_in_0_flit_bits_tail),
.auto_ingress_nodes_in_0_flit_bits_payload (auto_routers_ingress_nodes_in_0_flit_bits_payload),
.auto_ingress_nodes_in_0_flit_bits_egress_id (auto_routers_ingress_nodes_in_0_flit_bits_egress_id),
.auto_source_nodes_out_flit_0_valid (auto_routers_source_nodes_out_flit_0_valid),
.auto_source_nodes_out_flit_0_bits_head (auto_routers_source_nodes_out_flit_0_bits_head),
.auto_source_nodes_out_flit_0_bits_tail (auto_routers_source_nodes_out_flit_0_bits_tail),
.auto_source_nodes_out_flit_0_bits_payload (auto_routers_source_nodes_out_flit_0_bits_payload),
.auto_source_nodes_out_flit_0_bits_flow_vnet_id (auto_routers_source_nodes_out_flit_0_bits_flow_vnet_id),
.auto_source_nodes_out_flit_0_bits_flow_ingress_node (auto_routers_source_nodes_out_flit_0_bits_flow_ingress_node),
.auto_source_nodes_out_flit_0_bits_flow_ingress_node_id (auto_routers_source_nodes_out_flit_0_bits_flow_ingress_node_id),
.auto_source_nodes_out_flit_0_bits_flow_egress_node (auto_routers_source_nodes_out_flit_0_bits_flow_egress_node),
.auto_source_nodes_out_flit_0_bits_flow_egress_node_id (auto_routers_source_nodes_out_flit_0_bits_flow_egress_node_id),
.auto_source_nodes_out_flit_0_bits_virt_channel_id (auto_routers_source_nodes_out_flit_0_bits_virt_channel_id),
.auto_source_nodes_out_credit_return (auto_routers_source_nodes_out_credit_return),
.auto_source_nodes_out_vc_free (auto_routers_source_nodes_out_vc_free),
.auto_dest_nodes_in_flit_0_valid (auto_routers_dest_nodes_in_flit_0_valid),
.auto_dest_nodes_in_flit_0_bits_head (auto_routers_dest_nodes_in_flit_0_bits_head),
.auto_dest_nodes_in_flit_0_bits_tail (auto_routers_dest_nodes_in_flit_0_bits_tail),
.auto_dest_nodes_in_flit_0_bits_payload (auto_routers_dest_nodes_in_flit_0_bits_payload),
.auto_dest_nodes_in_flit_0_bits_flow_vnet_id (auto_routers_dest_nodes_in_flit_0_bits_flow_vnet_id),
.auto_dest_nodes_in_flit_0_bits_flow_ingress_node (auto_routers_dest_nodes_in_flit_0_bits_flow_ingress_node),
.auto_dest_nodes_in_flit_0_bits_flow_ingress_node_id (auto_routers_dest_nodes_in_flit_0_bits_flow_ingress_node_id),
.auto_dest_nodes_in_flit_0_bits_flow_egress_node (auto_routers_dest_nodes_in_flit_0_bits_flow_egress_node),
.auto_dest_nodes_in_flit_0_bits_flow_egress_node_id (auto_routers_dest_nodes_in_flit_0_bits_flow_egress_node_id),
.auto_dest_nodes_in_flit_0_bits_virt_channel_id (auto_routers_dest_nodes_in_flit_0_bits_virt_channel_id),
.auto_dest_nodes_in_credit_return (auto_routers_dest_nodes_in_credit_return),
.auto_dest_nodes_in_vc_free (auto_routers_dest_nodes_in_vc_free)
); // @[NoC.scala:67:22]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_228( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_416 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File Xbar.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.interrupts
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
class IntXbar()(implicit p: Parameters) extends LazyModule
{
val intnode = new IntNexusNode(
sinkFn = { _ => IntSinkPortParameters(Seq(IntSinkParameters())) },
sourceFn = { seq =>
IntSourcePortParameters((seq zip seq.map(_.num).scanLeft(0)(_+_).init).map {
case (s, o) => s.sources.map(z => z.copy(range = z.range.offset(o)))
}.flatten)
})
{
override def circuitIdentity = outputs == 1 && inputs == 1
}
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
override def desiredName = s"IntXbar_i${intnode.in.size}_o${intnode.out.size}"
val cat = intnode.in.map { case (i, e) => i.take(e.source.num) }.flatten
intnode.out.foreach { case (o, _) => o := cat }
}
}
class IntSyncXbar()(implicit p: Parameters) extends LazyModule
{
val intnode = new IntSyncNexusNode(
sinkFn = { _ => IntSinkPortParameters(Seq(IntSinkParameters())) },
sourceFn = { seq =>
IntSourcePortParameters((seq zip seq.map(_.num).scanLeft(0)(_+_).init).map {
case (s, o) => s.sources.map(z => z.copy(range = z.range.offset(o)))
}.flatten)
})
{
override def circuitIdentity = outputs == 1 && inputs == 1
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = s"IntSyncXbar_i${intnode.in.size}_o${intnode.out.size}"
val cat = intnode.in.map { case (i, e) => i.sync.take(e.source.num) }.flatten
intnode.out.foreach { case (o, _) => o.sync := cat }
}
}
object IntXbar {
def apply()(implicit p: Parameters): IntNode = {
val xbar = LazyModule(new IntXbar)
xbar.intnode
}
}
object IntSyncXbar {
def apply()(implicit p: Parameters): IntSyncNode = {
val xbar = LazyModule(new IntSyncXbar)
xbar.intnode
}
}
| module IntXbar_i0_o0_3(); // @[Xbar.scala:22:9]
wire childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire _childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RouteComputer.scala:
package constellation.router
import chisel3._
import chisel3.util._
import chisel3.util.experimental.decode.{TruthTable, decoder}
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import freechips.rocketchip.rocket.DecodeLogic
import constellation.channel._
import constellation.routing.{FlowRoutingBundle, FlowRoutingInfo}
import constellation.noc.{HasNoCParams}
class RouteComputerReq(implicit val p: Parameters) extends Bundle with HasNoCParams {
val src_virt_id = UInt(virtualChannelBits.W)
val flow = new FlowRoutingBundle
}
class RouteComputerResp(
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Bundle
with HasRouterOutputParams {
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
}
class RouteComputer(
val routerParams: RouterParams,
val inParams: Seq[ChannelParams],
val outParams: Seq[ChannelParams],
val ingressParams: Seq[IngressChannelParams],
val egressParams: Seq[EgressChannelParams]
)(implicit val p: Parameters) extends Module
with HasRouterParams
with HasRouterInputParams
with HasRouterOutputParams
with HasNoCParams {
val io = IO(new Bundle {
val req = MixedVec(allInParams.map { u => Flipped(Decoupled(new RouteComputerReq)) })
val resp = MixedVec(allInParams.map { u => Output(new RouteComputerResp(outParams, egressParams)) })
})
(io.req zip io.resp).zipWithIndex.map { case ((req, resp), i) =>
req.ready := true.B
if (outParams.size == 0) {
assert(!req.valid)
resp.vc_sel := DontCare
} else {
def toUInt(t: (Int, FlowRoutingInfo)): UInt = {
val l2 = (BigInt(t._1) << req.bits.flow.vnet_id .getWidth) | t._2.vNetId
val l3 = ( l2 << req.bits.flow.ingress_node .getWidth) | t._2.ingressNode
val l4 = ( l3 << req.bits.flow.ingress_node_id.getWidth) | t._2.ingressNodeId
val l5 = ( l4 << req.bits.flow.egress_node .getWidth) | t._2.egressNode
val l6 = ( l5 << req.bits.flow.egress_node_id .getWidth) | t._2.egressNodeId
l6.U(req.bits.getWidth.W)
}
val flow = req.bits.flow
val table = allInParams(i).possibleFlows.toSeq.distinct.map { pI =>
allInParams(i).channelRoutingInfos.map { cI =>
var row: String = "b"
(0 until nOutputs).foreach { o =>
(0 until outParams(o).nVirtualChannels).foreach { outVId =>
row = row + (if (routingRelation(cI, outParams(o).channelRoutingInfos(outVId), pI)) "1" else "0")
}
}
((cI.vc, pI), row)
}
}.flatten
val addr = req.bits.asUInt
val width = outParams.map(_.nVirtualChannels).reduce(_+_)
val decoded = if (table.size > 0) {
val truthTable = TruthTable(
table.map { e => (BitPat(toUInt(e._1)), BitPat(e._2)) },
BitPat("b" + "?" * width)
)
Reverse(decoder(addr, truthTable))
} else {
0.U(width.W)
}
var idx = 0
(0 until nAllOutputs).foreach { o =>
if (o < nOutputs) {
(0 until outParams(o).nVirtualChannels).foreach { outVId =>
resp.vc_sel(o)(outVId) := decoded(idx)
idx += 1
}
} else {
resp.vc_sel(o)(0) := false.B
}
}
}
}
}
| module RouteComputer_21( // @[RouteComputer.scala:29:7]
input [4:0] io_req_1_bits_src_virt_id, // @[RouteComputer.scala:40:14]
input [3:0] io_req_1_bits_flow_vnet_id, // @[RouteComputer.scala:40:14]
input [5:0] io_req_1_bits_flow_ingress_node, // @[RouteComputer.scala:40:14]
input [2:0] io_req_1_bits_flow_ingress_node_id, // @[RouteComputer.scala:40:14]
input [5:0] io_req_1_bits_flow_egress_node, // @[RouteComputer.scala:40:14]
input [2:0] io_req_1_bits_flow_egress_node_id, // @[RouteComputer.scala:40:14]
input [4:0] io_req_0_bits_src_virt_id, // @[RouteComputer.scala:40:14]
input [3:0] io_req_0_bits_flow_vnet_id, // @[RouteComputer.scala:40:14]
input [5:0] io_req_0_bits_flow_ingress_node, // @[RouteComputer.scala:40:14]
input [2:0] io_req_0_bits_flow_ingress_node_id, // @[RouteComputer.scala:40:14]
input [5:0] io_req_0_bits_flow_egress_node, // @[RouteComputer.scala:40:14]
input [2:0] io_req_0_bits_flow_egress_node_id, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_1_2, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_1_3, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_1_8, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_1_9, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_1_10, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_1_11, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_1_12, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_1_13, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_1_14, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_1_15, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_1_16, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_1_17, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_1_18, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_1_19, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_1_20, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_1_21, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_2, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_3, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_10, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_11, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_14, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_15, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_18, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_19, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_20, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_21, // @[RouteComputer.scala:40:14]
output io_resp_0_vc_sel_1_9, // @[RouteComputer.scala:40:14]
output io_resp_0_vc_sel_1_13, // @[RouteComputer.scala:40:14]
output io_resp_0_vc_sel_1_17, // @[RouteComputer.scala:40:14]
output io_resp_0_vc_sel_1_20, // @[RouteComputer.scala:40:14]
output io_resp_0_vc_sel_1_21 // @[RouteComputer.scala:40:14]
);
wire [2:0] decoded_invInputs = ~io_req_0_bits_flow_ingress_node_id; // @[pla.scala:78:21]
wire [1:0] _decoded_andMatrixOutputs_T = {decoded_invInputs[0], decoded_invInputs[2]}; // @[pla.scala:78:21, :91:29, :98:53]
wire [26:0] decoded_invInputs_1 = ~{io_req_1_bits_src_virt_id, io_req_1_bits_flow_vnet_id, io_req_1_bits_flow_ingress_node, io_req_1_bits_flow_ingress_node_id, io_req_1_bits_flow_egress_node, io_req_1_bits_flow_egress_node_id}; // @[pla.scala:78:21]
wire [2:0] _decoded_andMatrixOutputs_T_13 = {decoded_invInputs_1[0], io_req_1_bits_flow_egress_node_id[1], io_req_1_bits_flow_egress_node[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [1:0] _decoded_andMatrixOutputs_T_14 = {io_req_1_bits_flow_egress_node_id[2], io_req_1_bits_flow_egress_node[1]}; // @[pla.scala:90:45, :98:53]
wire [2:0] _decoded_andMatrixOutputs_T_15 = {io_req_1_bits_flow_egress_node_id[0], io_req_1_bits_flow_egress_node_id[1], io_req_1_bits_flow_egress_node[2]}; // @[pla.scala:90:45, :98:53]
wire [3:0] _decoded_andMatrixOutputs_T_24 = {io_req_1_bits_flow_egress_node_id[0], decoded_invInputs_1[1], io_req_1_bits_flow_egress_node[2], io_req_1_bits_flow_ingress_node[0]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [4:0] _decoded_andMatrixOutputs_T_34 = {io_req_1_bits_flow_egress_node_id[0], decoded_invInputs_1[5], decoded_invInputs_1[20], io_req_1_bits_src_virt_id[2], io_req_1_bits_src_virt_id[4]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [4:0] _decoded_andMatrixOutputs_T_35 = {io_req_1_bits_flow_egress_node[0], decoded_invInputs_1[10], decoded_invInputs_1[19], io_req_1_bits_src_virt_id[2], io_req_1_bits_src_virt_id[4]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [4:0] _decoded_andMatrixOutputs_T_36 = {io_req_1_bits_flow_egress_node_id[2], decoded_invInputs_1[4], io_req_1_bits_flow_vnet_id[0], io_req_1_bits_src_virt_id[2], io_req_1_bits_src_virt_id[4]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [3:0] _decoded_andMatrixOutputs_T_37 = {decoded_invInputs_1[18], io_req_1_bits_flow_vnet_id[1], io_req_1_bits_src_virt_id[2], io_req_1_bits_src_virt_id[4]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [4:0] _decoded_andMatrixOutputs_T_38 = {io_req_1_bits_flow_egress_node_id[1], decoded_invInputs_1[4], io_req_1_bits_flow_vnet_id[1], io_req_1_bits_src_virt_id[2], io_req_1_bits_src_virt_id[4]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [3:0] _decoded_andMatrixOutputs_T_39 = {decoded_invInputs_1[1], io_req_1_bits_flow_vnet_id[3], io_req_1_bits_src_virt_id[2], io_req_1_bits_src_virt_id[4]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [4:0] _decoded_andMatrixOutputs_T_40 = {io_req_1_bits_flow_egress_node_id[1], decoded_invInputs_1[4], io_req_1_bits_flow_vnet_id[3], io_req_1_bits_src_virt_id[2], io_req_1_bits_src_virt_id[4]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
assign io_resp_1_vc_sel_1_2 = |{&{io_req_1_bits_flow_egress_node_id[0], decoded_invInputs_1[5], decoded_invInputs_1[20], decoded_invInputs_1[22], decoded_invInputs_1[24]}, &{io_req_1_bits_flow_egress_node_id[0], decoded_invInputs_1[5], decoded_invInputs_1[20], decoded_invInputs_1[22], decoded_invInputs_1[26]}, &{io_req_1_bits_flow_egress_node_id[0], decoded_invInputs_1[1], decoded_invInputs_1[3], decoded_invInputs_1[4], io_req_1_bits_flow_egress_node[3]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}]
assign io_resp_1_vc_sel_1_3 = |{&{io_req_1_bits_flow_egress_node_id[0], decoded_invInputs_1[1], decoded_invInputs_1[5], io_req_1_bits_flow_ingress_node[0], io_req_1_bits_src_virt_id[0]}, &_decoded_andMatrixOutputs_T_34}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}]
assign io_resp_1_vc_sel_1_8 = &{decoded_invInputs_1[2], io_req_1_bits_flow_egress_node[0], decoded_invInputs_1[4], decoded_invInputs_1[5]}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}]
assign io_resp_1_vc_sel_1_9 = |{&{decoded_invInputs_1[2], io_req_1_bits_flow_egress_node[0], decoded_invInputs_1[4], decoded_invInputs_1[5], io_req_1_bits_src_virt_id[0]}, &_decoded_andMatrixOutputs_T_35}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}]
assign io_resp_1_vc_sel_1_10 = |{&{io_req_1_bits_flow_egress_node_id[2], decoded_invInputs_1[3], decoded_invInputs_1[4], decoded_invInputs_1[22], decoded_invInputs_1[24]}, &{io_req_1_bits_flow_egress_node_id[2], decoded_invInputs_1[3], decoded_invInputs_1[4], decoded_invInputs_1[22], decoded_invInputs_1[26]}, &{io_req_1_bits_flow_egress_node_id[2], decoded_invInputs_1[3], io_req_1_bits_flow_egress_node[3]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}]
assign io_resp_1_vc_sel_1_11 = |{&{io_req_1_bits_flow_egress_node_id[2], decoded_invInputs_1[3], decoded_invInputs_1[4], io_req_1_bits_src_virt_id[0]}, &_decoded_andMatrixOutputs_T_36}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}]
assign io_resp_1_vc_sel_1_12 = |{&{io_req_1_bits_flow_egress_node_id[0], decoded_invInputs_1[1], decoded_invInputs_1[6], decoded_invInputs_1[12]}, &{io_req_1_bits_flow_egress_node_id[0], decoded_invInputs_1[18], decoded_invInputs_1[22], decoded_invInputs_1[26]}, &{io_req_1_bits_flow_egress_node_id[0], io_req_1_bits_flow_egress_node[0], io_req_1_bits_flow_egress_node[1], io_req_1_bits_flow_egress_node[2]}, &{decoded_invInputs_1[1], decoded_invInputs_1[4], decoded_invInputs_1[5], io_req_1_bits_flow_vnet_id[1]}, &{decoded_invInputs_1[18], io_req_1_bits_flow_vnet_id[1], decoded_invInputs_1[22], decoded_invInputs_1[24]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}]
assign io_resp_1_vc_sel_1_13 = |{&{decoded_invInputs_1[18], io_req_1_bits_flow_vnet_id[1], io_req_1_bits_src_virt_id[0]}, &_decoded_andMatrixOutputs_T_37}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}]
assign io_resp_1_vc_sel_1_14 = |{&{io_req_1_bits_flow_egress_node_id[0], io_req_1_bits_flow_egress_node_id[1], decoded_invInputs_1[4], decoded_invInputs_1[22], decoded_invInputs_1[24]}, &{io_req_1_bits_flow_egress_node_id[0], io_req_1_bits_flow_egress_node_id[1], decoded_invInputs_1[4], decoded_invInputs_1[22], decoded_invInputs_1[26]}, &{io_req_1_bits_flow_egress_node_id[0], io_req_1_bits_flow_egress_node_id[1], io_req_1_bits_flow_egress_node[3]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}]
assign io_resp_1_vc_sel_1_15 = |{&{io_req_1_bits_flow_egress_node_id[0], io_req_1_bits_flow_egress_node_id[1], decoded_invInputs_1[4], io_req_1_bits_src_virt_id[0]}, &_decoded_andMatrixOutputs_T_38}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}]
assign io_resp_1_vc_sel_1_16 = |{&{decoded_invInputs_1[0], decoded_invInputs_1[1], decoded_invInputs_1[2], decoded_invInputs_1[6]}, &{decoded_invInputs_1[0], io_req_1_bits_flow_egress_node[0], io_req_1_bits_flow_egress_node[1], io_req_1_bits_flow_egress_node[2]}, &{decoded_invInputs_1[0], decoded_invInputs_1[1], decoded_invInputs_1[2], io_req_1_bits_flow_ingress_node_id[1], decoded_invInputs_1[22], decoded_invInputs_1[24]}, &{decoded_invInputs_1[0], decoded_invInputs_1[1], decoded_invInputs_1[2], io_req_1_bits_flow_ingress_node_id[1], decoded_invInputs_1[22], decoded_invInputs_1[26]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}]
assign io_resp_1_vc_sel_1_17 = |{&{io_req_1_bits_flow_ingress_node_id[1], decoded_invInputs_1[18], decoded_invInputs_1[19], io_req_1_bits_src_virt_id[0]}, &_decoded_andMatrixOutputs_T_39}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}]
assign io_resp_1_vc_sel_1_18 = |{&{decoded_invInputs_1[0], io_req_1_bits_flow_egress_node_id[1], decoded_invInputs_1[4], decoded_invInputs_1[22], decoded_invInputs_1[24]}, &{decoded_invInputs_1[0], io_req_1_bits_flow_egress_node_id[1], decoded_invInputs_1[4], decoded_invInputs_1[22], decoded_invInputs_1[26]}, &{decoded_invInputs_1[0], io_req_1_bits_flow_egress_node_id[1], io_req_1_bits_flow_egress_node[3]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}]
assign io_resp_1_vc_sel_1_19 = |{&{decoded_invInputs_1[0], io_req_1_bits_flow_egress_node_id[1], decoded_invInputs_1[4], io_req_1_bits_src_virt_id[0]}, &_decoded_andMatrixOutputs_T_40}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}]
assign io_resp_1_vc_sel_1_20 = |{&_decoded_andMatrixOutputs_T_34, &_decoded_andMatrixOutputs_T_35, &_decoded_andMatrixOutputs_T_36, &_decoded_andMatrixOutputs_T_37, &_decoded_andMatrixOutputs_T_38, &_decoded_andMatrixOutputs_T_39, &_decoded_andMatrixOutputs_T_40}; // @[pla.scala:98:{53,70}, :114:{19,36}]
assign io_resp_1_vc_sel_1_21 = |{&{decoded_invInputs_1[5], io_req_1_bits_src_virt_id[0], io_req_1_bits_src_virt_id[2], io_req_1_bits_src_virt_id[4]}, &{io_req_1_bits_flow_egress_node[3], io_req_1_bits_src_virt_id[0], io_req_1_bits_src_virt_id[2], io_req_1_bits_src_virt_id[4]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}]
assign io_resp_1_vc_sel_0_2 = &_decoded_andMatrixOutputs_T_24; // @[pla.scala:98:{53,70}]
assign io_resp_1_vc_sel_0_3 = &_decoded_andMatrixOutputs_T_24; // @[pla.scala:98:{53,70}]
assign io_resp_1_vc_sel_0_10 = &_decoded_andMatrixOutputs_T_14; // @[pla.scala:98:{53,70}]
assign io_resp_1_vc_sel_0_11 = &_decoded_andMatrixOutputs_T_14; // @[pla.scala:98:{53,70}]
assign io_resp_1_vc_sel_0_14 = &_decoded_andMatrixOutputs_T_15; // @[pla.scala:98:{53,70}]
assign io_resp_1_vc_sel_0_15 = &_decoded_andMatrixOutputs_T_15; // @[pla.scala:98:{53,70}]
assign io_resp_1_vc_sel_0_18 = &_decoded_andMatrixOutputs_T_13; // @[pla.scala:98:{53,70}]
assign io_resp_1_vc_sel_0_19 = &_decoded_andMatrixOutputs_T_13; // @[pla.scala:98:{53,70}]
assign io_resp_1_vc_sel_0_20 = |{&_decoded_andMatrixOutputs_T_13, &_decoded_andMatrixOutputs_T_14, &_decoded_andMatrixOutputs_T_15, &_decoded_andMatrixOutputs_T_24}; // @[pla.scala:98:{53,70}, :114:{19,36}]
assign io_resp_1_vc_sel_0_21 = |{&_decoded_andMatrixOutputs_T_13, &_decoded_andMatrixOutputs_T_14, &_decoded_andMatrixOutputs_T_15, &_decoded_andMatrixOutputs_T_24}; // @[pla.scala:98:{53,70}, :114:{19,36}]
assign io_resp_0_vc_sel_1_9 = &_decoded_andMatrixOutputs_T; // @[pla.scala:98:{53,70}]
assign io_resp_0_vc_sel_1_13 = io_req_0_bits_flow_ingress_node_id[2]; // @[pla.scala:90:45]
assign io_resp_0_vc_sel_1_17 = io_req_0_bits_flow_ingress_node_id[0]; // @[pla.scala:90:45]
assign io_resp_0_vc_sel_1_20 = |{&_decoded_andMatrixOutputs_T, io_req_0_bits_flow_ingress_node_id[0], io_req_0_bits_flow_ingress_node_id[2]}; // @[pla.scala:90:45, :98:{53,70}, :114:{19,36}]
assign io_resp_0_vc_sel_1_21 = |{&_decoded_andMatrixOutputs_T, io_req_0_bits_flow_ingress_node_id[0], io_req_0_bits_flow_ingress_node_id[2]}; // @[pla.scala:90:45, :98:{53,70}, :114:{19,36}]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
| module OptimizationBarrier_TLBEntryData_205( // @[package.scala:267:30]
input clock, // @[package.scala:267:30]
input reset, // @[package.scala:267:30]
input [19:0] io_x_ppn, // @[package.scala:268:18]
input io_x_u, // @[package.scala:268:18]
input io_x_g, // @[package.scala:268:18]
input io_x_ae_ptw, // @[package.scala:268:18]
input io_x_ae_final, // @[package.scala:268:18]
input io_x_ae_stage2, // @[package.scala:268:18]
input io_x_pf, // @[package.scala:268:18]
input io_x_gf, // @[package.scala:268:18]
input io_x_sw, // @[package.scala:268:18]
input io_x_sx, // @[package.scala:268:18]
input io_x_sr, // @[package.scala:268:18]
input io_x_hw, // @[package.scala:268:18]
input io_x_hx, // @[package.scala:268:18]
input io_x_hr, // @[package.scala:268:18]
input io_x_pw, // @[package.scala:268:18]
input io_x_px, // @[package.scala:268:18]
input io_x_pr, // @[package.scala:268:18]
input io_x_ppp, // @[package.scala:268:18]
input io_x_pal, // @[package.scala:268:18]
input io_x_paa, // @[package.scala:268:18]
input io_x_eff, // @[package.scala:268:18]
input io_x_c, // @[package.scala:268:18]
input io_x_fragmented_superpage, // @[package.scala:268:18]
output [19:0] io_y_ppn, // @[package.scala:268:18]
output io_y_u, // @[package.scala:268:18]
output io_y_ae_ptw, // @[package.scala:268:18]
output io_y_ae_final, // @[package.scala:268:18]
output io_y_ae_stage2, // @[package.scala:268:18]
output io_y_pf, // @[package.scala:268:18]
output io_y_gf, // @[package.scala:268:18]
output io_y_sw, // @[package.scala:268:18]
output io_y_sx, // @[package.scala:268:18]
output io_y_sr, // @[package.scala:268:18]
output io_y_hw, // @[package.scala:268:18]
output io_y_hx, // @[package.scala:268:18]
output io_y_hr, // @[package.scala:268:18]
output io_y_pw, // @[package.scala:268:18]
output io_y_px, // @[package.scala:268:18]
output io_y_pr, // @[package.scala:268:18]
output io_y_ppp, // @[package.scala:268:18]
output io_y_pal, // @[package.scala:268:18]
output io_y_paa, // @[package.scala:268:18]
output io_y_eff, // @[package.scala:268:18]
output io_y_c // @[package.scala:268:18]
);
wire [19:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30]
wire io_x_u_0 = io_x_u; // @[package.scala:267:30]
wire io_x_g_0 = io_x_g; // @[package.scala:267:30]
wire io_x_ae_ptw_0 = io_x_ae_ptw; // @[package.scala:267:30]
wire io_x_ae_final_0 = io_x_ae_final; // @[package.scala:267:30]
wire io_x_ae_stage2_0 = io_x_ae_stage2; // @[package.scala:267:30]
wire io_x_pf_0 = io_x_pf; // @[package.scala:267:30]
wire io_x_gf_0 = io_x_gf; // @[package.scala:267:30]
wire io_x_sw_0 = io_x_sw; // @[package.scala:267:30]
wire io_x_sx_0 = io_x_sx; // @[package.scala:267:30]
wire io_x_sr_0 = io_x_sr; // @[package.scala:267:30]
wire io_x_hw_0 = io_x_hw; // @[package.scala:267:30]
wire io_x_hx_0 = io_x_hx; // @[package.scala:267:30]
wire io_x_hr_0 = io_x_hr; // @[package.scala:267:30]
wire io_x_pw_0 = io_x_pw; // @[package.scala:267:30]
wire io_x_px_0 = io_x_px; // @[package.scala:267:30]
wire io_x_pr_0 = io_x_pr; // @[package.scala:267:30]
wire io_x_ppp_0 = io_x_ppp; // @[package.scala:267:30]
wire io_x_pal_0 = io_x_pal; // @[package.scala:267:30]
wire io_x_paa_0 = io_x_paa; // @[package.scala:267:30]
wire io_x_eff_0 = io_x_eff; // @[package.scala:267:30]
wire io_x_c_0 = io_x_c; // @[package.scala:267:30]
wire io_x_fragmented_superpage_0 = io_x_fragmented_superpage; // @[package.scala:267:30]
wire [19:0] io_y_ppn_0 = io_x_ppn_0; // @[package.scala:267:30]
wire io_y_u_0 = io_x_u_0; // @[package.scala:267:30]
wire io_y_g = io_x_g_0; // @[package.scala:267:30]
wire io_y_ae_ptw_0 = io_x_ae_ptw_0; // @[package.scala:267:30]
wire io_y_ae_final_0 = io_x_ae_final_0; // @[package.scala:267:30]
wire io_y_ae_stage2_0 = io_x_ae_stage2_0; // @[package.scala:267:30]
wire io_y_pf_0 = io_x_pf_0; // @[package.scala:267:30]
wire io_y_gf_0 = io_x_gf_0; // @[package.scala:267:30]
wire io_y_sw_0 = io_x_sw_0; // @[package.scala:267:30]
wire io_y_sx_0 = io_x_sx_0; // @[package.scala:267:30]
wire io_y_sr_0 = io_x_sr_0; // @[package.scala:267:30]
wire io_y_hw_0 = io_x_hw_0; // @[package.scala:267:30]
wire io_y_hx_0 = io_x_hx_0; // @[package.scala:267:30]
wire io_y_hr_0 = io_x_hr_0; // @[package.scala:267:30]
wire io_y_pw_0 = io_x_pw_0; // @[package.scala:267:30]
wire io_y_px_0 = io_x_px_0; // @[package.scala:267:30]
wire io_y_pr_0 = io_x_pr_0; // @[package.scala:267:30]
wire io_y_ppp_0 = io_x_ppp_0; // @[package.scala:267:30]
wire io_y_pal_0 = io_x_pal_0; // @[package.scala:267:30]
wire io_y_paa_0 = io_x_paa_0; // @[package.scala:267:30]
wire io_y_eff_0 = io_x_eff_0; // @[package.scala:267:30]
wire io_y_c_0 = io_x_c_0; // @[package.scala:267:30]
wire io_y_fragmented_superpage = io_x_fragmented_superpage_0; // @[package.scala:267:30]
assign io_y_ppn = io_y_ppn_0; // @[package.scala:267:30]
assign io_y_u = io_y_u_0; // @[package.scala:267:30]
assign io_y_ae_ptw = io_y_ae_ptw_0; // @[package.scala:267:30]
assign io_y_ae_final = io_y_ae_final_0; // @[package.scala:267:30]
assign io_y_ae_stage2 = io_y_ae_stage2_0; // @[package.scala:267:30]
assign io_y_pf = io_y_pf_0; // @[package.scala:267:30]
assign io_y_gf = io_y_gf_0; // @[package.scala:267:30]
assign io_y_sw = io_y_sw_0; // @[package.scala:267:30]
assign io_y_sx = io_y_sx_0; // @[package.scala:267:30]
assign io_y_sr = io_y_sr_0; // @[package.scala:267:30]
assign io_y_hw = io_y_hw_0; // @[package.scala:267:30]
assign io_y_hx = io_y_hx_0; // @[package.scala:267:30]
assign io_y_hr = io_y_hr_0; // @[package.scala:267:30]
assign io_y_pw = io_y_pw_0; // @[package.scala:267:30]
assign io_y_px = io_y_px_0; // @[package.scala:267:30]
assign io_y_pr = io_y_pr_0; // @[package.scala:267:30]
assign io_y_ppp = io_y_ppp_0; // @[package.scala:267:30]
assign io_y_pal = io_y_pal_0; // @[package.scala:267:30]
assign io_y_paa = io_y_paa_0; // @[package.scala:267:30]
assign io_y_eff = io_y_eff_0; // @[package.scala:267:30]
assign io_y_c = io_y_c_0; // @[package.scala:267:30]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_119( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19]
wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File util.scala:
//******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v4.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v4.common.{MicroOp}
import boom.v4.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, flush: Bool, uop: MicroOp): Bool = {
return apply(brupdate, flush, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, flush: Bool, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask) || flush
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: T): Bool = {
return apply(brupdate, flush, bundle.uop)
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Bool = {
return apply(brupdate, flush, bundle.bits)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, flush, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v4.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U, IS_N}
def apply(i: UInt, isel: UInt): UInt = {
val ip = Mux(isel === IS_N, 0.U(LONGEST_IMM_SZ.W), i)
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0)
}
}
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
object IsYoungerMask
{
def apply(i: UInt, head: UInt, n: Integer): UInt = {
val hi_mask = ~MaskLower(UIntToOH(i)(n-1,0))
val lo_mask = ~MaskUpper(UIntToOH(head)(n-1,0))
Mux(i < head, hi_mask & lo_mask, hi_mask | lo_mask)(n-1,0)
}
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v4.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v4.common.MicroOp => Bool = u => true.B, fastDeq: Boolean = false)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
if (fastDeq && entries > 1) {
// Pipeline dequeue selection so the mux gets an entire cycle
val main = Module(new BranchKillableQueue(gen, entries-1, flush_fn, false))
val out_reg = Reg(gen)
val out_valid = RegInit(false.B)
val out_uop = Reg(new MicroOp)
main.io.enq <> io.enq
main.io.brupdate := io.brupdate
main.io.flush := io.flush
io.empty := main.io.empty && !out_valid
io.count := main.io.count + out_valid
io.deq.valid := out_valid
io.deq.bits := out_reg
io.deq.bits.uop := out_uop
out_uop := UpdateBrMask(io.brupdate, out_uop)
out_valid := out_valid && !IsKilledByBranch(io.brupdate, false.B, out_uop) && !(io.flush && flush_fn(out_uop))
main.io.deq.ready := false.B
when (io.deq.fire || !out_valid) {
out_valid := main.io.deq.valid && !IsKilledByBranch(io.brupdate, false.B, main.io.deq.bits.uop) && !(io.flush && flush_fn(main.io.deq.bits.uop))
out_reg := main.io.deq.bits
out_uop := UpdateBrMask(io.brupdate, main.io.deq.bits.uop)
main.io.deq.ready := true.B
}
} else {
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire && !IsKilledByBranch(io.brupdate, false.B, io.enq.bits.uop) && !(io.flush && flush_fn(io.enq.bits.uop)))
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, false.B, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value)
io.deq.bits := out
val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
}
class BranchKillablePipeline[T <: boom.v4.common.HasBoomUOP](gen: T, stages: Int)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val req = Input(Valid(gen))
val flush = Input(Bool())
val brupdate = Input(new BrUpdateInfo)
val resp = Output(Vec(stages, Valid(gen)))
})
require(stages > 0)
val uops = Reg(Vec(stages, Valid(gen)))
uops(0).valid := io.req.valid && !IsKilledByBranch(io.brupdate, io.flush, io.req.bits)
uops(0).bits := UpdateBrMask(io.brupdate, io.req.bits)
for (i <- 1 until stages) {
uops(i).valid := uops(i-1).valid && !IsKilledByBranch(io.brupdate, io.flush, uops(i-1).bits)
uops(i).bits := UpdateBrMask(io.brupdate, uops(i-1).bits)
}
for (i <- 0 until stages) { when (reset.asBool) { uops(i).valid := false.B } }
io.resp := uops
}
File issue-slot.scala:
//******************************************************************************
// Copyright (c) 2015 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// RISCV Processor Issue Slot Logic
//--------------------------------------------------------------------------
//------------------------------------------------------------------------------
//
// Note: stores (and AMOs) are "broken down" into 2 uops, but stored within a single issue-slot.
// TODO XXX make a separate issueSlot for MemoryIssueSlots, and only they break apart stores.
// TODO Disable ldspec for FP queue.
package boom.v4.exu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import boom.v4.common._
import boom.v4.util._
class IssueSlotIO(val numWakeupPorts: Int)(implicit p: Parameters) extends BoomBundle
{
val valid = Output(Bool())
val will_be_valid = Output(Bool()) // TODO code review, do we need this signal so explicitely?
val request = Output(Bool())
val grant = Input(Bool())
val iss_uop = Output(new MicroOp())
val in_uop = Input(Valid(new MicroOp())) // if valid, this WILL overwrite an entry!
val out_uop = Output(new MicroOp())
val brupdate = Input(new BrUpdateInfo())
val kill = Input(Bool()) // pipeline flush
val clear = Input(Bool()) // entry being moved elsewhere (not mutually exclusive with grant)
val squash_grant = Input(Bool())
val wakeup_ports = Flipped(Vec(numWakeupPorts, Valid(new Wakeup)))
val pred_wakeup_port = Flipped(Valid(UInt(log2Ceil(ftqSz).W)))
val child_rebusys = Input(UInt(aluWidth.W))
}
class IssueSlot(val numWakeupPorts: Int, val isMem: Boolean, val isFp: Boolean)(implicit p: Parameters)
extends BoomModule
{
val io = IO(new IssueSlotIO(numWakeupPorts))
val slot_valid = RegInit(false.B)
val slot_uop = Reg(new MicroOp())
val next_valid = WireInit(slot_valid)
val next_uop = WireInit(UpdateBrMask(io.brupdate, slot_uop))
val killed = IsKilledByBranch(io.brupdate, io.kill, slot_uop)
io.valid := slot_valid
io.out_uop := next_uop
io.will_be_valid := next_valid && !killed
when (io.kill) {
slot_valid := false.B
} .elsewhen (io.in_uop.valid) {
slot_valid := true.B
} .elsewhen (io.clear) {
slot_valid := false.B
} .otherwise {
slot_valid := next_valid && !killed
}
when (io.in_uop.valid) {
slot_uop := io.in_uop.bits
assert (!slot_valid || io.clear || io.kill)
} .otherwise {
slot_uop := next_uop
}
// Wakeups
next_uop.iw_p1_bypass_hint := false.B
next_uop.iw_p2_bypass_hint := false.B
next_uop.iw_p3_bypass_hint := false.B
next_uop.iw_p1_speculative_child := 0.U
next_uop.iw_p2_speculative_child := 0.U
val rebusied_prs1 = WireInit(false.B)
val rebusied_prs2 = WireInit(false.B)
val rebusied = rebusied_prs1 || rebusied_prs2
val prs1_matches = io.wakeup_ports.map { w => w.bits.uop.pdst === slot_uop.prs1 }
val prs2_matches = io.wakeup_ports.map { w => w.bits.uop.pdst === slot_uop.prs2 }
val prs3_matches = io.wakeup_ports.map { w => w.bits.uop.pdst === slot_uop.prs3 }
val prs1_wakeups = (io.wakeup_ports zip prs1_matches).map { case (w,m) => w.valid && m }
val prs2_wakeups = (io.wakeup_ports zip prs2_matches).map { case (w,m) => w.valid && m }
val prs3_wakeups = (io.wakeup_ports zip prs3_matches).map { case (w,m) => w.valid && m }
val prs1_rebusys = (io.wakeup_ports zip prs1_matches).map { case (w,m) => w.bits.rebusy && m }
val prs2_rebusys = (io.wakeup_ports zip prs2_matches).map { case (w,m) => w.bits.rebusy && m }
val bypassables = io.wakeup_ports.map { w => w.bits.bypassable }
val speculative_masks = io.wakeup_ports.map { w => w.bits.speculative_mask }
when (prs1_wakeups.reduce(_||_)) {
next_uop.prs1_busy := false.B
next_uop.iw_p1_speculative_child := Mux1H(prs1_wakeups, speculative_masks)
next_uop.iw_p1_bypass_hint := Mux1H(prs1_wakeups, bypassables)
}
when ((prs1_rebusys.reduce(_||_) || ((io.child_rebusys & slot_uop.iw_p1_speculative_child) =/= 0.U)) &&
slot_uop.lrs1_rtype === RT_FIX) {
next_uop.prs1_busy := true.B
rebusied_prs1 := true.B
}
when (prs2_wakeups.reduce(_||_)) {
next_uop.prs2_busy := false.B
next_uop.iw_p2_speculative_child := Mux1H(prs2_wakeups, speculative_masks)
next_uop.iw_p2_bypass_hint := Mux1H(prs2_wakeups, bypassables)
}
when ((prs2_rebusys.reduce(_||_) || ((io.child_rebusys & slot_uop.iw_p2_speculative_child) =/= 0.U)) &&
slot_uop.lrs2_rtype === RT_FIX) {
next_uop.prs2_busy := true.B
rebusied_prs2 := true.B
}
when (prs3_wakeups.reduce(_||_)) {
next_uop.prs3_busy := false.B
next_uop.iw_p3_bypass_hint := Mux1H(prs3_wakeups, bypassables)
}
when (io.pred_wakeup_port.valid && io.pred_wakeup_port.bits === slot_uop.ppred) {
next_uop.ppred_busy := false.B
}
val iss_ready = !slot_uop.prs1_busy && !slot_uop.prs2_busy && !(slot_uop.ppred_busy && enableSFBOpt.B) && !(slot_uop.prs3_busy && isFp.B)
val agen_ready = (slot_uop.fu_code(FC_AGEN) && !slot_uop.prs1_busy && !(slot_uop.ppred_busy && enableSFBOpt.B) && isMem.B)
val dgen_ready = (slot_uop.fu_code(FC_DGEN) && !slot_uop.prs2_busy && !(slot_uop.ppred_busy && enableSFBOpt.B) && isMem.B)
io.request := slot_valid && !slot_uop.iw_issued && (
iss_ready || agen_ready || dgen_ready
)
io.iss_uop := slot_uop
// Update state for current micro-op based on grant
next_uop.iw_issued := false.B
next_uop.iw_issued_partial_agen := false.B
next_uop.iw_issued_partial_dgen := false.B
when (io.grant && !io.squash_grant) {
next_uop.iw_issued := true.B
}
if (isMem) {
when (slot_uop.fu_code(FC_AGEN) && slot_uop.fu_code(FC_DGEN)) {
when (agen_ready) {
// Issue the AGEN, next slot entry is a DGEN
when (io.grant && !io.squash_grant) {
next_uop.iw_issued_partial_agen := true.B
}
io.iss_uop.fu_code(FC_AGEN) := true.B
io.iss_uop.fu_code(FC_DGEN) := false.B
} .otherwise {
// Issue the DGEN, next slot entry is the AGEN
when (io.grant && !io.squash_grant) {
next_uop.iw_issued_partial_dgen := true.B
}
io.iss_uop.fu_code(FC_AGEN) := false.B
io.iss_uop.fu_code(FC_DGEN) := true.B
io.iss_uop.imm_sel := IS_N
io.iss_uop.prs1 := slot_uop.prs2
io.iss_uop.lrs1_rtype := slot_uop.lrs2_rtype
io.iss_uop.iw_p1_bypass_hint := slot_uop.iw_p2_bypass_hint
}
} .elsewhen (slot_uop.fu_code(FC_DGEN)) {
io.iss_uop.imm_sel := IS_N
io.iss_uop.prs1 := slot_uop.prs2
io.iss_uop.lrs1_rtype := slot_uop.lrs2_rtype
io.iss_uop.iw_p1_bypass_hint := slot_uop.iw_p2_bypass_hint
}
io.iss_uop.lrs2_rtype := RT_X
io.iss_uop.prs2 := io.iss_uop.prs1 // helps with DCE
}
when (slot_valid && slot_uop.iw_issued) {
next_valid := rebusied
if (isMem) {
when (slot_uop.iw_issued_partial_agen) {
next_valid := true.B
when (!rebusied_prs1) {
next_uop.fu_code(FC_AGEN) := false.B
next_uop.fu_code(FC_DGEN) := true.B
}
} .elsewhen (slot_uop.iw_issued_partial_dgen) {
next_valid := true.B
when (!rebusied_prs2) {
next_uop.fu_code(FC_AGEN) := true.B
next_uop.fu_code(FC_DGEN) := false.B
}
}
}
}
}
| module IssueSlot_1( // @[issue-slot.scala:49:7]
input clock, // @[issue-slot.scala:49:7]
input reset, // @[issue-slot.scala:49:7]
output io_valid, // @[issue-slot.scala:52:14]
output io_will_be_valid, // @[issue-slot.scala:52:14]
output io_request, // @[issue-slot.scala:52:14]
input io_grant, // @[issue-slot.scala:52:14]
output [31:0] io_iss_uop_inst, // @[issue-slot.scala:52:14]
output [31:0] io_iss_uop_debug_inst, // @[issue-slot.scala:52:14]
output io_iss_uop_is_rvc, // @[issue-slot.scala:52:14]
output [39:0] io_iss_uop_debug_pc, // @[issue-slot.scala:52:14]
output io_iss_uop_iq_type_0, // @[issue-slot.scala:52:14]
output io_iss_uop_iq_type_1, // @[issue-slot.scala:52:14]
output io_iss_uop_iq_type_2, // @[issue-slot.scala:52:14]
output io_iss_uop_iq_type_3, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_0, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_1, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_2, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_3, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_4, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_5, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_6, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_7, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_8, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_9, // @[issue-slot.scala:52:14]
output io_iss_uop_iw_issued, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
output io_iss_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
output io_iss_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
output io_iss_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_dis_col_sel, // @[issue-slot.scala:52:14]
output [11:0] io_iss_uop_br_mask, // @[issue-slot.scala:52:14]
output [3:0] io_iss_uop_br_tag, // @[issue-slot.scala:52:14]
output [3:0] io_iss_uop_br_type, // @[issue-slot.scala:52:14]
output io_iss_uop_is_sfb, // @[issue-slot.scala:52:14]
output io_iss_uop_is_fence, // @[issue-slot.scala:52:14]
output io_iss_uop_is_fencei, // @[issue-slot.scala:52:14]
output io_iss_uop_is_sfence, // @[issue-slot.scala:52:14]
output io_iss_uop_is_amo, // @[issue-slot.scala:52:14]
output io_iss_uop_is_eret, // @[issue-slot.scala:52:14]
output io_iss_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
output io_iss_uop_is_rocc, // @[issue-slot.scala:52:14]
output io_iss_uop_is_mov, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_ftq_idx, // @[issue-slot.scala:52:14]
output io_iss_uop_edge_inst, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_pc_lob, // @[issue-slot.scala:52:14]
output io_iss_uop_taken, // @[issue-slot.scala:52:14]
output io_iss_uop_imm_rename, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_imm_sel, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_pimm, // @[issue-slot.scala:52:14]
output [19:0] io_iss_uop_imm_packed, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_op1_sel, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_op2_sel, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_rob_idx, // @[issue-slot.scala:52:14]
output [3:0] io_iss_uop_ldq_idx, // @[issue-slot.scala:52:14]
output [3:0] io_iss_uop_stq_idx, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_rxq_idx, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_pdst, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_prs1, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_prs2, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_prs3, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_ppred, // @[issue-slot.scala:52:14]
output io_iss_uop_prs1_busy, // @[issue-slot.scala:52:14]
output io_iss_uop_prs2_busy, // @[issue-slot.scala:52:14]
output io_iss_uop_prs3_busy, // @[issue-slot.scala:52:14]
output io_iss_uop_ppred_busy, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_stale_pdst, // @[issue-slot.scala:52:14]
output io_iss_uop_exception, // @[issue-slot.scala:52:14]
output [63:0] io_iss_uop_exc_cause, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_mem_cmd, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_mem_size, // @[issue-slot.scala:52:14]
output io_iss_uop_mem_signed, // @[issue-slot.scala:52:14]
output io_iss_uop_uses_ldq, // @[issue-slot.scala:52:14]
output io_iss_uop_uses_stq, // @[issue-slot.scala:52:14]
output io_iss_uop_is_unique, // @[issue-slot.scala:52:14]
output io_iss_uop_flush_on_commit, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_csr_cmd, // @[issue-slot.scala:52:14]
output io_iss_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_ldst, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_lrs1, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_lrs2, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_lrs3, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_dst_rtype, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
output io_iss_uop_frs3_en, // @[issue-slot.scala:52:14]
output io_iss_uop_fcn_dw, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_fcn_op, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_val, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_fp_rm, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_fp_typ, // @[issue-slot.scala:52:14]
output io_iss_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
output io_iss_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
output io_iss_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
output io_iss_uop_bp_debug_if, // @[issue-slot.scala:52:14]
output io_iss_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_debug_fsrc, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_in_uop_valid, // @[issue-slot.scala:52:14]
input [31:0] io_in_uop_bits_inst, // @[issue-slot.scala:52:14]
input [31:0] io_in_uop_bits_debug_inst, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_in_uop_bits_debug_pc, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iq_type_0, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iq_type_1, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iq_type_2, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iq_type_3, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_0, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_1, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_2, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_3, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_4, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_5, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_6, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_7, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_8, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_9, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iw_issued, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_dis_col_sel, // @[issue-slot.scala:52:14]
input [11:0] io_in_uop_bits_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_in_uop_bits_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_in_uop_bits_br_type, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_sfb, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_fence, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_fencei, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_sfence, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_amo, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_eret, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_rocc, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_ftq_idx, // @[issue-slot.scala:52:14]
input io_in_uop_bits_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_pc_lob, // @[issue-slot.scala:52:14]
input io_in_uop_bits_taken, // @[issue-slot.scala:52:14]
input io_in_uop_bits_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_in_uop_bits_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_op2_sel, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_rob_idx, // @[issue-slot.scala:52:14]
input [3:0] io_in_uop_bits_ldq_idx, // @[issue-slot.scala:52:14]
input [3:0] io_in_uop_bits_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_ppred, // @[issue-slot.scala:52:14]
input io_in_uop_bits_prs1_busy, // @[issue-slot.scala:52:14]
input io_in_uop_bits_prs2_busy, // @[issue-slot.scala:52:14]
input io_in_uop_bits_prs3_busy, // @[issue-slot.scala:52:14]
input io_in_uop_bits_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_stale_pdst, // @[issue-slot.scala:52:14]
input io_in_uop_bits_exception, // @[issue-slot.scala:52:14]
input [63:0] io_in_uop_bits_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_mem_size, // @[issue-slot.scala:52:14]
input io_in_uop_bits_mem_signed, // @[issue-slot.scala:52:14]
input io_in_uop_bits_uses_ldq, // @[issue-slot.scala:52:14]
input io_in_uop_bits_uses_stq, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_unique, // @[issue-slot.scala:52:14]
input io_in_uop_bits_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_csr_cmd, // @[issue-slot.scala:52:14]
input io_in_uop_bits_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_in_uop_bits_frs3_en, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_fcn_op, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_fp_typ, // @[issue-slot.scala:52:14]
input io_in_uop_bits_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_in_uop_bits_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_in_uop_bits_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_in_uop_bits_bp_debug_if, // @[issue-slot.scala:52:14]
input io_in_uop_bits_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_debug_tsrc, // @[issue-slot.scala:52:14]
output [31:0] io_out_uop_inst, // @[issue-slot.scala:52:14]
output [31:0] io_out_uop_debug_inst, // @[issue-slot.scala:52:14]
output io_out_uop_is_rvc, // @[issue-slot.scala:52:14]
output [39:0] io_out_uop_debug_pc, // @[issue-slot.scala:52:14]
output io_out_uop_iq_type_0, // @[issue-slot.scala:52:14]
output io_out_uop_iq_type_1, // @[issue-slot.scala:52:14]
output io_out_uop_iq_type_2, // @[issue-slot.scala:52:14]
output io_out_uop_iq_type_3, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_0, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_1, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_2, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_3, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_4, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_5, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_6, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_7, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_8, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_9, // @[issue-slot.scala:52:14]
output io_out_uop_iw_issued, // @[issue-slot.scala:52:14]
output io_out_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
output io_out_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
output io_out_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_dis_col_sel, // @[issue-slot.scala:52:14]
output [11:0] io_out_uop_br_mask, // @[issue-slot.scala:52:14]
output [3:0] io_out_uop_br_tag, // @[issue-slot.scala:52:14]
output [3:0] io_out_uop_br_type, // @[issue-slot.scala:52:14]
output io_out_uop_is_sfb, // @[issue-slot.scala:52:14]
output io_out_uop_is_fence, // @[issue-slot.scala:52:14]
output io_out_uop_is_fencei, // @[issue-slot.scala:52:14]
output io_out_uop_is_sfence, // @[issue-slot.scala:52:14]
output io_out_uop_is_amo, // @[issue-slot.scala:52:14]
output io_out_uop_is_eret, // @[issue-slot.scala:52:14]
output io_out_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
output io_out_uop_is_rocc, // @[issue-slot.scala:52:14]
output io_out_uop_is_mov, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_ftq_idx, // @[issue-slot.scala:52:14]
output io_out_uop_edge_inst, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_pc_lob, // @[issue-slot.scala:52:14]
output io_out_uop_taken, // @[issue-slot.scala:52:14]
output io_out_uop_imm_rename, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_imm_sel, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_pimm, // @[issue-slot.scala:52:14]
output [19:0] io_out_uop_imm_packed, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_op1_sel, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_op2_sel, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_rob_idx, // @[issue-slot.scala:52:14]
output [3:0] io_out_uop_ldq_idx, // @[issue-slot.scala:52:14]
output [3:0] io_out_uop_stq_idx, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_rxq_idx, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_pdst, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_prs1, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_prs2, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_prs3, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_ppred, // @[issue-slot.scala:52:14]
output io_out_uop_prs1_busy, // @[issue-slot.scala:52:14]
output io_out_uop_prs2_busy, // @[issue-slot.scala:52:14]
output io_out_uop_prs3_busy, // @[issue-slot.scala:52:14]
output io_out_uop_ppred_busy, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_stale_pdst, // @[issue-slot.scala:52:14]
output io_out_uop_exception, // @[issue-slot.scala:52:14]
output [63:0] io_out_uop_exc_cause, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_mem_cmd, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_mem_size, // @[issue-slot.scala:52:14]
output io_out_uop_mem_signed, // @[issue-slot.scala:52:14]
output io_out_uop_uses_ldq, // @[issue-slot.scala:52:14]
output io_out_uop_uses_stq, // @[issue-slot.scala:52:14]
output io_out_uop_is_unique, // @[issue-slot.scala:52:14]
output io_out_uop_flush_on_commit, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_csr_cmd, // @[issue-slot.scala:52:14]
output io_out_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_ldst, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_lrs1, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_lrs2, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_lrs3, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_dst_rtype, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
output io_out_uop_frs3_en, // @[issue-slot.scala:52:14]
output io_out_uop_fcn_dw, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_fcn_op, // @[issue-slot.scala:52:14]
output io_out_uop_fp_val, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_fp_rm, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_fp_typ, // @[issue-slot.scala:52:14]
output io_out_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
output io_out_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
output io_out_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
output io_out_uop_bp_debug_if, // @[issue-slot.scala:52:14]
output io_out_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_debug_fsrc, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input [11:0] io_brupdate_b1_resolve_mask, // @[issue-slot.scala:52:14]
input [11:0] io_brupdate_b1_mispredict_mask, // @[issue-slot.scala:52:14]
input [31:0] io_brupdate_b2_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_brupdate_b2_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_brupdate_b2_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_issued, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_issued_partial_agen, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_issued_partial_dgen, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [11:0] io_brupdate_b2_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_brupdate_b2_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_brupdate_b2_uop_br_type, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_fence, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_amo, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_eret, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_taken, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_brupdate_b2_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_rob_idx, // @[issue-slot.scala:52:14]
input [3:0] io_brupdate_b2_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [3:0] io_brupdate_b2_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_ppred, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_brupdate_b2_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_mem_size, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_unique, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_brupdate_b2_mispredict, // @[issue-slot.scala:52:14]
input io_brupdate_b2_taken, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_cfi_type, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_pc_sel, // @[issue-slot.scala:52:14]
input [39:0] io_brupdate_b2_jalr_target, // @[issue-slot.scala:52:14]
input [20:0] io_brupdate_b2_target_offset, // @[issue-slot.scala:52:14]
input io_kill, // @[issue-slot.scala:52:14]
input io_clear, // @[issue-slot.scala:52:14]
input io_squash_grant, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_valid, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_0_bits_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_0_bits_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_wakeup_ports_0_bits_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_issued, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_issued_partial_agen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_issued_partial_dgen, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [11:0] io_wakeup_ports_0_bits_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_0_bits_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_0_bits_uop_br_type, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_fence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_amo, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_eret, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_taken, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_wakeup_ports_0_bits_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_rob_idx, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_0_bits_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_0_bits_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_ppred, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_wakeup_ports_0_bits_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_mem_size, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_unique, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_valid, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_1_bits_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_1_bits_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_wakeup_ports_1_bits_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_issued, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_issued_partial_agen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_issued_partial_dgen, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [11:0] io_wakeup_ports_1_bits_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_1_bits_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_1_bits_uop_br_type, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_fence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_amo, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_eret, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_taken, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_wakeup_ports_1_bits_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_rob_idx, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_1_bits_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_1_bits_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_ppred, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_wakeup_ports_1_bits_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_mem_size, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_unique, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_debug_tsrc // @[issue-slot.scala:52:14]
);
wire [11:0] next_uop_out_br_mask; // @[util.scala:104:23]
wire io_grant_0 = io_grant; // @[issue-slot.scala:49:7]
wire io_in_uop_valid_0 = io_in_uop_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_in_uop_bits_inst_0 = io_in_uop_bits_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_in_uop_bits_debug_inst_0 = io_in_uop_bits_debug_inst; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_rvc_0 = io_in_uop_bits_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_in_uop_bits_debug_pc_0 = io_in_uop_bits_debug_pc; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iq_type_0_0 = io_in_uop_bits_iq_type_0; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iq_type_1_0 = io_in_uop_bits_iq_type_1; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iq_type_2_0 = io_in_uop_bits_iq_type_2; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iq_type_3_0 = io_in_uop_bits_iq_type_3; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_0_0 = io_in_uop_bits_fu_code_0; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_1_0 = io_in_uop_bits_fu_code_1; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_2_0 = io_in_uop_bits_fu_code_2; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_3_0 = io_in_uop_bits_fu_code_3; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_4_0 = io_in_uop_bits_fu_code_4; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_5_0 = io_in_uop_bits_fu_code_5; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_6_0 = io_in_uop_bits_fu_code_6; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_7_0 = io_in_uop_bits_fu_code_7; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_8_0 = io_in_uop_bits_fu_code_8; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_9_0 = io_in_uop_bits_fu_code_9; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_issued_0 = io_in_uop_bits_iw_issued; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_p1_bypass_hint_0 = io_in_uop_bits_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_p2_bypass_hint_0 = io_in_uop_bits_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_p3_bypass_hint_0 = io_in_uop_bits_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_dis_col_sel_0 = io_in_uop_bits_dis_col_sel; // @[issue-slot.scala:49:7]
wire [11:0] io_in_uop_bits_br_mask_0 = io_in_uop_bits_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_in_uop_bits_br_tag_0 = io_in_uop_bits_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_in_uop_bits_br_type_0 = io_in_uop_bits_br_type; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_sfb_0 = io_in_uop_bits_is_sfb; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_fence_0 = io_in_uop_bits_is_fence; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_fencei_0 = io_in_uop_bits_is_fencei; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_sfence_0 = io_in_uop_bits_is_sfence; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_amo_0 = io_in_uop_bits_is_amo; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_eret_0 = io_in_uop_bits_is_eret; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_sys_pc2epc_0 = io_in_uop_bits_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_rocc_0 = io_in_uop_bits_is_rocc; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_mov_0 = io_in_uop_bits_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_ftq_idx_0 = io_in_uop_bits_ftq_idx; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_edge_inst_0 = io_in_uop_bits_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_pc_lob_0 = io_in_uop_bits_pc_lob; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_taken_0 = io_in_uop_bits_taken; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_imm_rename_0 = io_in_uop_bits_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_imm_sel_0 = io_in_uop_bits_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_pimm_0 = io_in_uop_bits_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_in_uop_bits_imm_packed_0 = io_in_uop_bits_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_op1_sel_0 = io_in_uop_bits_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_op2_sel_0 = io_in_uop_bits_op2_sel; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_ldst_0 = io_in_uop_bits_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_wen_0 = io_in_uop_bits_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_ren1_0 = io_in_uop_bits_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_ren2_0 = io_in_uop_bits_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_ren3_0 = io_in_uop_bits_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_swap12_0 = io_in_uop_bits_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_swap23_0 = io_in_uop_bits_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_fp_ctrl_typeTagIn_0 = io_in_uop_bits_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_fp_ctrl_typeTagOut_0 = io_in_uop_bits_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_fromint_0 = io_in_uop_bits_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_toint_0 = io_in_uop_bits_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_fastpipe_0 = io_in_uop_bits_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_fma_0 = io_in_uop_bits_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_div_0 = io_in_uop_bits_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_sqrt_0 = io_in_uop_bits_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_wflags_0 = io_in_uop_bits_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_vec_0 = io_in_uop_bits_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_rob_idx_0 = io_in_uop_bits_rob_idx; // @[issue-slot.scala:49:7]
wire [3:0] io_in_uop_bits_ldq_idx_0 = io_in_uop_bits_ldq_idx; // @[issue-slot.scala:49:7]
wire [3:0] io_in_uop_bits_stq_idx_0 = io_in_uop_bits_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_rxq_idx_0 = io_in_uop_bits_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_pdst_0 = io_in_uop_bits_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_prs1_0 = io_in_uop_bits_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_prs2_0 = io_in_uop_bits_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_prs3_0 = io_in_uop_bits_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_ppred_0 = io_in_uop_bits_ppred; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_prs1_busy_0 = io_in_uop_bits_prs1_busy; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_prs2_busy_0 = io_in_uop_bits_prs2_busy; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_prs3_busy_0 = io_in_uop_bits_prs3_busy; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_ppred_busy_0 = io_in_uop_bits_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_stale_pdst_0 = io_in_uop_bits_stale_pdst; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_exception_0 = io_in_uop_bits_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_in_uop_bits_exc_cause_0 = io_in_uop_bits_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_mem_cmd_0 = io_in_uop_bits_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_mem_size_0 = io_in_uop_bits_mem_size; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_mem_signed_0 = io_in_uop_bits_mem_signed; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_uses_ldq_0 = io_in_uop_bits_uses_ldq; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_uses_stq_0 = io_in_uop_bits_uses_stq; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_unique_0 = io_in_uop_bits_is_unique; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_flush_on_commit_0 = io_in_uop_bits_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_csr_cmd_0 = io_in_uop_bits_csr_cmd; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_ldst_is_rs1_0 = io_in_uop_bits_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_ldst_0 = io_in_uop_bits_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_lrs1_0 = io_in_uop_bits_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_lrs2_0 = io_in_uop_bits_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_lrs3_0 = io_in_uop_bits_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_dst_rtype_0 = io_in_uop_bits_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_lrs1_rtype_0 = io_in_uop_bits_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_lrs2_rtype_0 = io_in_uop_bits_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_frs3_en_0 = io_in_uop_bits_frs3_en; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fcn_dw_0 = io_in_uop_bits_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_fcn_op_0 = io_in_uop_bits_fcn_op; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_val_0 = io_in_uop_bits_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_fp_rm_0 = io_in_uop_bits_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_fp_typ_0 = io_in_uop_bits_fp_typ; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_xcpt_pf_if_0 = io_in_uop_bits_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_xcpt_ae_if_0 = io_in_uop_bits_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_xcpt_ma_if_0 = io_in_uop_bits_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_bp_debug_if_0 = io_in_uop_bits_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_bp_xcpt_if_0 = io_in_uop_bits_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_debug_fsrc_0 = io_in_uop_bits_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_debug_tsrc_0 = io_in_uop_bits_debug_tsrc; // @[issue-slot.scala:49:7]
wire [11:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[issue-slot.scala:49:7]
wire [11:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[issue-slot.scala:49:7]
wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iq_type_0_0 = io_brupdate_b2_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iq_type_1_0 = io_brupdate_b2_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iq_type_2_0 = io_brupdate_b2_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iq_type_3_0 = io_brupdate_b2_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_0_0 = io_brupdate_b2_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_1_0 = io_brupdate_b2_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_2_0 = io_brupdate_b2_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_3_0 = io_brupdate_b2_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_4_0 = io_brupdate_b2_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_5_0 = io_brupdate_b2_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_6_0 = io_brupdate_b2_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_7_0 = io_brupdate_b2_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_8_0 = io_brupdate_b2_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_9_0 = io_brupdate_b2_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_issued_0 = io_brupdate_b2_uop_iw_issued; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_issued_partial_agen_0 = io_brupdate_b2_uop_iw_issued_partial_agen; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_issued_partial_dgen_0 = io_brupdate_b2_uop_iw_issued_partial_dgen; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_iw_p1_speculative_child_0 = io_brupdate_b2_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_iw_p2_speculative_child_0 = io_brupdate_b2_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_p1_bypass_hint_0 = io_brupdate_b2_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_p2_bypass_hint_0 = io_brupdate_b2_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_p3_bypass_hint_0 = io_brupdate_b2_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_dis_col_sel_0 = io_brupdate_b2_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [11:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_brupdate_b2_uop_br_type_0 = io_brupdate_b2_uop_br_type; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_sfence_0 = io_brupdate_b2_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_eret_0 = io_brupdate_b2_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_rocc_0 = io_brupdate_b2_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_mov_0 = io_brupdate_b2_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_imm_rename_0 = io_brupdate_b2_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_imm_sel_0 = io_brupdate_b2_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_pimm_0 = io_brupdate_b2_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_op1_sel_0 = io_brupdate_b2_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_op2_sel_0 = io_brupdate_b2_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_ldst_0 = io_brupdate_b2_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_wen_0 = io_brupdate_b2_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_ren1_0 = io_brupdate_b2_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_ren2_0 = io_brupdate_b2_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_ren3_0 = io_brupdate_b2_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_swap12_0 = io_brupdate_b2_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_swap23_0 = io_brupdate_b2_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagIn_0 = io_brupdate_b2_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagOut_0 = io_brupdate_b2_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_fromint_0 = io_brupdate_b2_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_toint_0 = io_brupdate_b2_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_fastpipe_0 = io_brupdate_b2_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_fma_0 = io_brupdate_b2_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_div_0 = io_brupdate_b2_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_sqrt_0 = io_brupdate_b2_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_wflags_0 = io_brupdate_b2_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_vec_0 = io_brupdate_b2_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [3:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [3:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_csr_cmd_0 = io_brupdate_b2_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fcn_dw_0 = io_brupdate_b2_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_fcn_op_0 = io_brupdate_b2_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_fp_rm_0 = io_brupdate_b2_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_fp_typ_0 = io_brupdate_b2_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[issue-slot.scala:49:7]
wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[issue-slot.scala:49:7]
wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[issue-slot.scala:49:7]
wire io_kill_0 = io_kill; // @[issue-slot.scala:49:7]
wire io_clear_0 = io_clear; // @[issue-slot.scala:49:7]
wire io_squash_grant_0 = io_squash_grant; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_valid_0 = io_wakeup_ports_0_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_0_bits_uop_inst_0 = io_wakeup_ports_0_bits_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_0_bits_uop_debug_inst_0 = io_wakeup_ports_0_bits_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_rvc_0 = io_wakeup_ports_0_bits_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_wakeup_ports_0_bits_uop_debug_pc_0 = io_wakeup_ports_0_bits_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iq_type_0_0 = io_wakeup_ports_0_bits_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iq_type_1_0 = io_wakeup_ports_0_bits_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iq_type_2_0 = io_wakeup_ports_0_bits_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iq_type_3_0 = io_wakeup_ports_0_bits_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_0_0 = io_wakeup_ports_0_bits_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_1_0 = io_wakeup_ports_0_bits_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_2_0 = io_wakeup_ports_0_bits_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_3_0 = io_wakeup_ports_0_bits_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_4_0 = io_wakeup_ports_0_bits_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_5_0 = io_wakeup_ports_0_bits_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_6_0 = io_wakeup_ports_0_bits_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_7_0 = io_wakeup_ports_0_bits_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_8_0 = io_wakeup_ports_0_bits_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_9_0 = io_wakeup_ports_0_bits_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_issued_0 = io_wakeup_ports_0_bits_uop_iw_issued; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_issued_partial_agen_0 = io_wakeup_ports_0_bits_uop_iw_issued_partial_agen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_issued_partial_dgen_0 = io_wakeup_ports_0_bits_uop_iw_issued_partial_dgen; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_iw_p1_speculative_child_0 = io_wakeup_ports_0_bits_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_iw_p2_speculative_child_0 = io_wakeup_ports_0_bits_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_p1_bypass_hint_0 = io_wakeup_ports_0_bits_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_p2_bypass_hint_0 = io_wakeup_ports_0_bits_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_p3_bypass_hint_0 = io_wakeup_ports_0_bits_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_dis_col_sel_0 = io_wakeup_ports_0_bits_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [11:0] io_wakeup_ports_0_bits_uop_br_mask_0 = io_wakeup_ports_0_bits_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_0_bits_uop_br_tag_0 = io_wakeup_ports_0_bits_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_0_bits_uop_br_type_0 = io_wakeup_ports_0_bits_uop_br_type; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_sfb_0 = io_wakeup_ports_0_bits_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_fence_0 = io_wakeup_ports_0_bits_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_fencei_0 = io_wakeup_ports_0_bits_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_sfence_0 = io_wakeup_ports_0_bits_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_amo_0 = io_wakeup_ports_0_bits_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_eret_0 = io_wakeup_ports_0_bits_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_sys_pc2epc_0 = io_wakeup_ports_0_bits_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_rocc_0 = io_wakeup_ports_0_bits_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_mov_0 = io_wakeup_ports_0_bits_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_ftq_idx_0 = io_wakeup_ports_0_bits_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_edge_inst_0 = io_wakeup_ports_0_bits_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_pc_lob_0 = io_wakeup_ports_0_bits_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_taken_0 = io_wakeup_ports_0_bits_uop_taken; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_imm_rename_0 = io_wakeup_ports_0_bits_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_imm_sel_0 = io_wakeup_ports_0_bits_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_pimm_0 = io_wakeup_ports_0_bits_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_wakeup_ports_0_bits_uop_imm_packed_0 = io_wakeup_ports_0_bits_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_op1_sel_0 = io_wakeup_ports_0_bits_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_op2_sel_0 = io_wakeup_ports_0_bits_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_ldst_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_wen_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_ren1_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_ren2_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_ren3_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_swap12_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_swap23_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagIn_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagOut_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_fromint_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_toint_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_fastpipe_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_fma_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_div_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_sqrt_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_wflags_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_vec_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_rob_idx_0 = io_wakeup_ports_0_bits_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_0_bits_uop_ldq_idx_0 = io_wakeup_ports_0_bits_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_0_bits_uop_stq_idx_0 = io_wakeup_ports_0_bits_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_rxq_idx_0 = io_wakeup_ports_0_bits_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_pdst_0 = io_wakeup_ports_0_bits_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_prs1_0 = io_wakeup_ports_0_bits_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_prs2_0 = io_wakeup_ports_0_bits_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_prs3_0 = io_wakeup_ports_0_bits_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_ppred_0 = io_wakeup_ports_0_bits_uop_ppred; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_prs1_busy_0 = io_wakeup_ports_0_bits_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_prs2_busy_0 = io_wakeup_ports_0_bits_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_prs3_busy_0 = io_wakeup_ports_0_bits_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_ppred_busy_0 = io_wakeup_ports_0_bits_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_stale_pdst_0 = io_wakeup_ports_0_bits_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_exception_0 = io_wakeup_ports_0_bits_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_wakeup_ports_0_bits_uop_exc_cause_0 = io_wakeup_ports_0_bits_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_mem_cmd_0 = io_wakeup_ports_0_bits_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_mem_size_0 = io_wakeup_ports_0_bits_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_mem_signed_0 = io_wakeup_ports_0_bits_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_uses_ldq_0 = io_wakeup_ports_0_bits_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_uses_stq_0 = io_wakeup_ports_0_bits_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_unique_0 = io_wakeup_ports_0_bits_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_flush_on_commit_0 = io_wakeup_ports_0_bits_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_csr_cmd_0 = io_wakeup_ports_0_bits_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_ldst_is_rs1_0 = io_wakeup_ports_0_bits_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_ldst_0 = io_wakeup_ports_0_bits_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_lrs1_0 = io_wakeup_ports_0_bits_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_lrs2_0 = io_wakeup_ports_0_bits_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_lrs3_0 = io_wakeup_ports_0_bits_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_dst_rtype_0 = io_wakeup_ports_0_bits_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_lrs1_rtype_0 = io_wakeup_ports_0_bits_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_lrs2_rtype_0 = io_wakeup_ports_0_bits_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_frs3_en_0 = io_wakeup_ports_0_bits_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fcn_dw_0 = io_wakeup_ports_0_bits_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_fcn_op_0 = io_wakeup_ports_0_bits_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_val_0 = io_wakeup_ports_0_bits_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_fp_rm_0 = io_wakeup_ports_0_bits_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_fp_typ_0 = io_wakeup_ports_0_bits_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_xcpt_pf_if_0 = io_wakeup_ports_0_bits_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_xcpt_ae_if_0 = io_wakeup_ports_0_bits_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_xcpt_ma_if_0 = io_wakeup_ports_0_bits_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_bp_debug_if_0 = io_wakeup_ports_0_bits_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_bp_xcpt_if_0 = io_wakeup_ports_0_bits_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_debug_fsrc_0 = io_wakeup_ports_0_bits_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_debug_tsrc_0 = io_wakeup_ports_0_bits_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_valid_0 = io_wakeup_ports_1_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_1_bits_uop_inst_0 = io_wakeup_ports_1_bits_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_1_bits_uop_debug_inst_0 = io_wakeup_ports_1_bits_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_rvc_0 = io_wakeup_ports_1_bits_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_wakeup_ports_1_bits_uop_debug_pc_0 = io_wakeup_ports_1_bits_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iq_type_0_0 = io_wakeup_ports_1_bits_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iq_type_1_0 = io_wakeup_ports_1_bits_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iq_type_2_0 = io_wakeup_ports_1_bits_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iq_type_3_0 = io_wakeup_ports_1_bits_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_0_0 = io_wakeup_ports_1_bits_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_1_0 = io_wakeup_ports_1_bits_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_2_0 = io_wakeup_ports_1_bits_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_3_0 = io_wakeup_ports_1_bits_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_4_0 = io_wakeup_ports_1_bits_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_5_0 = io_wakeup_ports_1_bits_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_6_0 = io_wakeup_ports_1_bits_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_7_0 = io_wakeup_ports_1_bits_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_8_0 = io_wakeup_ports_1_bits_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_9_0 = io_wakeup_ports_1_bits_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_issued_0 = io_wakeup_ports_1_bits_uop_iw_issued; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_issued_partial_agen_0 = io_wakeup_ports_1_bits_uop_iw_issued_partial_agen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_issued_partial_dgen_0 = io_wakeup_ports_1_bits_uop_iw_issued_partial_dgen; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_iw_p1_speculative_child_0 = io_wakeup_ports_1_bits_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_iw_p2_speculative_child_0 = io_wakeup_ports_1_bits_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_p1_bypass_hint_0 = io_wakeup_ports_1_bits_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_p2_bypass_hint_0 = io_wakeup_ports_1_bits_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_p3_bypass_hint_0 = io_wakeup_ports_1_bits_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_dis_col_sel_0 = io_wakeup_ports_1_bits_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [11:0] io_wakeup_ports_1_bits_uop_br_mask_0 = io_wakeup_ports_1_bits_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_1_bits_uop_br_tag_0 = io_wakeup_ports_1_bits_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_1_bits_uop_br_type_0 = io_wakeup_ports_1_bits_uop_br_type; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_sfb_0 = io_wakeup_ports_1_bits_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_fence_0 = io_wakeup_ports_1_bits_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_fencei_0 = io_wakeup_ports_1_bits_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_sfence_0 = io_wakeup_ports_1_bits_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_amo_0 = io_wakeup_ports_1_bits_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_eret_0 = io_wakeup_ports_1_bits_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_sys_pc2epc_0 = io_wakeup_ports_1_bits_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_rocc_0 = io_wakeup_ports_1_bits_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_mov_0 = io_wakeup_ports_1_bits_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_ftq_idx_0 = io_wakeup_ports_1_bits_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_edge_inst_0 = io_wakeup_ports_1_bits_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_pc_lob_0 = io_wakeup_ports_1_bits_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_taken_0 = io_wakeup_ports_1_bits_uop_taken; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_imm_rename_0 = io_wakeup_ports_1_bits_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_imm_sel_0 = io_wakeup_ports_1_bits_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_pimm_0 = io_wakeup_ports_1_bits_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_wakeup_ports_1_bits_uop_imm_packed_0 = io_wakeup_ports_1_bits_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_op1_sel_0 = io_wakeup_ports_1_bits_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_op2_sel_0 = io_wakeup_ports_1_bits_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_ldst_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_wen_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_ren1_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_ren2_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_ren3_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_swap12_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_swap23_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagIn_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagOut_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_fromint_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_toint_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_fastpipe_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_fma_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_div_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_sqrt_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_wflags_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_vec_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_rob_idx_0 = io_wakeup_ports_1_bits_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_1_bits_uop_ldq_idx_0 = io_wakeup_ports_1_bits_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_1_bits_uop_stq_idx_0 = io_wakeup_ports_1_bits_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_rxq_idx_0 = io_wakeup_ports_1_bits_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_pdst_0 = io_wakeup_ports_1_bits_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_prs1_0 = io_wakeup_ports_1_bits_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_prs2_0 = io_wakeup_ports_1_bits_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_prs3_0 = io_wakeup_ports_1_bits_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_ppred_0 = io_wakeup_ports_1_bits_uop_ppred; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_prs1_busy_0 = io_wakeup_ports_1_bits_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_prs2_busy_0 = io_wakeup_ports_1_bits_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_prs3_busy_0 = io_wakeup_ports_1_bits_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_ppred_busy_0 = io_wakeup_ports_1_bits_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_stale_pdst_0 = io_wakeup_ports_1_bits_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_exception_0 = io_wakeup_ports_1_bits_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_wakeup_ports_1_bits_uop_exc_cause_0 = io_wakeup_ports_1_bits_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_mem_cmd_0 = io_wakeup_ports_1_bits_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_mem_size_0 = io_wakeup_ports_1_bits_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_mem_signed_0 = io_wakeup_ports_1_bits_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_uses_ldq_0 = io_wakeup_ports_1_bits_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_uses_stq_0 = io_wakeup_ports_1_bits_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_unique_0 = io_wakeup_ports_1_bits_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_flush_on_commit_0 = io_wakeup_ports_1_bits_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_csr_cmd_0 = io_wakeup_ports_1_bits_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_ldst_is_rs1_0 = io_wakeup_ports_1_bits_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_ldst_0 = io_wakeup_ports_1_bits_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_lrs1_0 = io_wakeup_ports_1_bits_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_lrs2_0 = io_wakeup_ports_1_bits_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_lrs3_0 = io_wakeup_ports_1_bits_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_dst_rtype_0 = io_wakeup_ports_1_bits_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_lrs1_rtype_0 = io_wakeup_ports_1_bits_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_lrs2_rtype_0 = io_wakeup_ports_1_bits_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_frs3_en_0 = io_wakeup_ports_1_bits_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fcn_dw_0 = io_wakeup_ports_1_bits_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_fcn_op_0 = io_wakeup_ports_1_bits_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_val_0 = io_wakeup_ports_1_bits_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_fp_rm_0 = io_wakeup_ports_1_bits_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_fp_typ_0 = io_wakeup_ports_1_bits_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_xcpt_pf_if_0 = io_wakeup_ports_1_bits_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_xcpt_ae_if_0 = io_wakeup_ports_1_bits_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_xcpt_ma_if_0 = io_wakeup_ports_1_bits_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_bp_debug_if_0 = io_wakeup_ports_1_bits_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_bp_xcpt_if_0 = io_wakeup_ports_1_bits_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_debug_fsrc_0 = io_wakeup_ports_1_bits_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_debug_tsrc_0 = io_wakeup_ports_1_bits_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_rebusy = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_bypassable = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_rebusy = 1'h0; // @[issue-slot.scala:49:7]
wire io_pred_wakeup_port_valid = 1'h0; // @[issue-slot.scala:49:7]
wire next_uop_out_iw_issued_partial_agen = 1'h0; // @[util.scala:104:23]
wire next_uop_out_iw_issued_partial_dgen = 1'h0; // @[util.scala:104:23]
wire next_uop_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:59:28]
wire next_uop_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:59:28]
wire rebusied_prs1 = 1'h0; // @[issue-slot.scala:92:31]
wire rebusied_prs2 = 1'h0; // @[issue-slot.scala:93:31]
wire rebusied = 1'h0; // @[issue-slot.scala:94:32]
wire prs1_rebusys_0 = 1'h0; // @[issue-slot.scala:102:91]
wire prs1_rebusys_1 = 1'h0; // @[issue-slot.scala:102:91]
wire prs2_rebusys_0 = 1'h0; // @[issue-slot.scala:103:91]
wire prs2_rebusys_1 = 1'h0; // @[issue-slot.scala:103:91]
wire _next_uop_iw_p1_bypass_hint_T_1 = 1'h0; // @[Mux.scala:30:73]
wire _next_uop_iw_p2_bypass_hint_T_1 = 1'h0; // @[Mux.scala:30:73]
wire _next_uop_iw_p3_bypass_hint_T_1 = 1'h0; // @[Mux.scala:30:73]
wire agen_ready = 1'h0; // @[issue-slot.scala:137:114]
wire dgen_ready = 1'h0; // @[issue-slot.scala:138:114]
wire [1:0] io_in_uop_bits_iw_p1_speculative_child = 2'h0; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_iw_p2_speculative_child = 2'h0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_iw_p1_speculative_child = 2'h0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_iw_p2_speculative_child = 2'h0; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_speculative_mask = 2'h0; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_speculative_mask = 2'h0; // @[issue-slot.scala:49:7]
wire [1:0] io_child_rebusys = 2'h0; // @[issue-slot.scala:49:7]
wire [1:0] next_uop_iw_p1_speculative_child = 2'h0; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_iw_p2_speculative_child = 2'h0; // @[issue-slot.scala:59:28]
wire [1:0] _next_uop_iw_p1_speculative_child_T = 2'h0; // @[Mux.scala:30:73]
wire [1:0] _next_uop_iw_p1_speculative_child_T_1 = 2'h0; // @[Mux.scala:30:73]
wire [1:0] _next_uop_iw_p1_speculative_child_T_2 = 2'h0; // @[Mux.scala:30:73]
wire [1:0] _next_uop_iw_p1_speculative_child_WIRE = 2'h0; // @[Mux.scala:30:73]
wire [1:0] _next_uop_iw_p2_speculative_child_T = 2'h0; // @[Mux.scala:30:73]
wire [1:0] _next_uop_iw_p2_speculative_child_T_1 = 2'h0; // @[Mux.scala:30:73]
wire [1:0] _next_uop_iw_p2_speculative_child_T_2 = 2'h0; // @[Mux.scala:30:73]
wire [1:0] _next_uop_iw_p2_speculative_child_WIRE = 2'h0; // @[Mux.scala:30:73]
wire io_wakeup_ports_0_bits_bypassable = 1'h1; // @[issue-slot.scala:49:7]
wire [4:0] io_pred_wakeup_port_bits = 5'h0; // @[issue-slot.scala:49:7]
wire _io_will_be_valid_T_1; // @[issue-slot.scala:65:34]
wire _io_request_T_4; // @[issue-slot.scala:140:51]
wire [31:0] next_uop_inst; // @[issue-slot.scala:59:28]
wire [31:0] next_uop_debug_inst; // @[issue-slot.scala:59:28]
wire next_uop_is_rvc; // @[issue-slot.scala:59:28]
wire [39:0] next_uop_debug_pc; // @[issue-slot.scala:59:28]
wire next_uop_iq_type_0; // @[issue-slot.scala:59:28]
wire next_uop_iq_type_1; // @[issue-slot.scala:59:28]
wire next_uop_iq_type_2; // @[issue-slot.scala:59:28]
wire next_uop_iq_type_3; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_0; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_1; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_2; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_3; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_4; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_5; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_6; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_7; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_8; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_9; // @[issue-slot.scala:59:28]
wire next_uop_iw_issued; // @[issue-slot.scala:59:28]
wire next_uop_iw_p1_bypass_hint; // @[issue-slot.scala:59:28]
wire next_uop_iw_p2_bypass_hint; // @[issue-slot.scala:59:28]
wire next_uop_iw_p3_bypass_hint; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_dis_col_sel; // @[issue-slot.scala:59:28]
wire [11:0] next_uop_br_mask; // @[issue-slot.scala:59:28]
wire [3:0] next_uop_br_tag; // @[issue-slot.scala:59:28]
wire [3:0] next_uop_br_type; // @[issue-slot.scala:59:28]
wire next_uop_is_sfb; // @[issue-slot.scala:59:28]
wire next_uop_is_fence; // @[issue-slot.scala:59:28]
wire next_uop_is_fencei; // @[issue-slot.scala:59:28]
wire next_uop_is_sfence; // @[issue-slot.scala:59:28]
wire next_uop_is_amo; // @[issue-slot.scala:59:28]
wire next_uop_is_eret; // @[issue-slot.scala:59:28]
wire next_uop_is_sys_pc2epc; // @[issue-slot.scala:59:28]
wire next_uop_is_rocc; // @[issue-slot.scala:59:28]
wire next_uop_is_mov; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_ftq_idx; // @[issue-slot.scala:59:28]
wire next_uop_edge_inst; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_pc_lob; // @[issue-slot.scala:59:28]
wire next_uop_taken; // @[issue-slot.scala:59:28]
wire next_uop_imm_rename; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_imm_sel; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_pimm; // @[issue-slot.scala:59:28]
wire [19:0] next_uop_imm_packed; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_op1_sel; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_op2_sel; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_ldst; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_wen; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_ren1; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_ren2; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_ren3; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_swap12; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_swap23; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_fromint; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_toint; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_fma; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_div; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_sqrt; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_wflags; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_vec; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_rob_idx; // @[issue-slot.scala:59:28]
wire [3:0] next_uop_ldq_idx; // @[issue-slot.scala:59:28]
wire [3:0] next_uop_stq_idx; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_rxq_idx; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_pdst; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_prs1; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_prs2; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_prs3; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_ppred; // @[issue-slot.scala:59:28]
wire next_uop_prs1_busy; // @[issue-slot.scala:59:28]
wire next_uop_prs2_busy; // @[issue-slot.scala:59:28]
wire next_uop_prs3_busy; // @[issue-slot.scala:59:28]
wire next_uop_ppred_busy; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_stale_pdst; // @[issue-slot.scala:59:28]
wire next_uop_exception; // @[issue-slot.scala:59:28]
wire [63:0] next_uop_exc_cause; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_mem_cmd; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_mem_size; // @[issue-slot.scala:59:28]
wire next_uop_mem_signed; // @[issue-slot.scala:59:28]
wire next_uop_uses_ldq; // @[issue-slot.scala:59:28]
wire next_uop_uses_stq; // @[issue-slot.scala:59:28]
wire next_uop_is_unique; // @[issue-slot.scala:59:28]
wire next_uop_flush_on_commit; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_csr_cmd; // @[issue-slot.scala:59:28]
wire next_uop_ldst_is_rs1; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_ldst; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_lrs1; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_lrs2; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_lrs3; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_dst_rtype; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_lrs1_rtype; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_lrs2_rtype; // @[issue-slot.scala:59:28]
wire next_uop_frs3_en; // @[issue-slot.scala:59:28]
wire next_uop_fcn_dw; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_fcn_op; // @[issue-slot.scala:59:28]
wire next_uop_fp_val; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_fp_rm; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_fp_typ; // @[issue-slot.scala:59:28]
wire next_uop_xcpt_pf_if; // @[issue-slot.scala:59:28]
wire next_uop_xcpt_ae_if; // @[issue-slot.scala:59:28]
wire next_uop_xcpt_ma_if; // @[issue-slot.scala:59:28]
wire next_uop_bp_debug_if; // @[issue-slot.scala:59:28]
wire next_uop_bp_xcpt_if; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_debug_fsrc; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_debug_tsrc; // @[issue-slot.scala:59:28]
wire io_iss_uop_iq_type_0_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iq_type_1_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iq_type_2_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iq_type_3_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_0_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_1_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_2_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_3_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_4_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_5_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_6_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_7_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_8_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_9_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_ldst_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_wen_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_ren1_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_ren2_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_ren3_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_swap12_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_swap23_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_fp_ctrl_typeTagIn_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_fp_ctrl_typeTagOut_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_fromint_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_toint_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_fastpipe_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_fma_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_div_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_sqrt_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_wflags_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_vec_0; // @[issue-slot.scala:49:7]
wire [31:0] io_iss_uop_inst_0; // @[issue-slot.scala:49:7]
wire [31:0] io_iss_uop_debug_inst_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_rvc_0; // @[issue-slot.scala:49:7]
wire [39:0] io_iss_uop_debug_pc_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_issued_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_iw_p1_speculative_child_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_iw_p2_speculative_child_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_p1_bypass_hint_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_p2_bypass_hint_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_p3_bypass_hint_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_dis_col_sel_0; // @[issue-slot.scala:49:7]
wire [11:0] io_iss_uop_br_mask_0; // @[issue-slot.scala:49:7]
wire [3:0] io_iss_uop_br_tag_0; // @[issue-slot.scala:49:7]
wire [3:0] io_iss_uop_br_type_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_sfb_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_fence_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_fencei_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_sfence_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_amo_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_eret_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_sys_pc2epc_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_rocc_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_mov_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_ftq_idx_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_edge_inst_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_pc_lob_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_taken_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_imm_rename_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_imm_sel_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_pimm_0; // @[issue-slot.scala:49:7]
wire [19:0] io_iss_uop_imm_packed_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_op1_sel_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_op2_sel_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_rob_idx_0; // @[issue-slot.scala:49:7]
wire [3:0] io_iss_uop_ldq_idx_0; // @[issue-slot.scala:49:7]
wire [3:0] io_iss_uop_stq_idx_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_rxq_idx_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_pdst_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_prs1_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_prs2_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_prs3_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_ppred_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_prs1_busy_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_prs2_busy_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_prs3_busy_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_ppred_busy_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_stale_pdst_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_exception_0; // @[issue-slot.scala:49:7]
wire [63:0] io_iss_uop_exc_cause_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_mem_cmd_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_mem_size_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_mem_signed_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_uses_ldq_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_uses_stq_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_unique_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_flush_on_commit_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_csr_cmd_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_ldst_is_rs1_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_ldst_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_lrs1_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_lrs2_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_lrs3_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_dst_rtype_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_lrs1_rtype_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_lrs2_rtype_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_frs3_en_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fcn_dw_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_fcn_op_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_val_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_fp_rm_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_fp_typ_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_xcpt_pf_if_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_xcpt_ae_if_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_xcpt_ma_if_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_bp_debug_if_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_bp_xcpt_if_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_debug_fsrc_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_debug_tsrc_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iq_type_0_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iq_type_1_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iq_type_2_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iq_type_3_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_0_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_1_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_2_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_3_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_4_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_5_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_6_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_7_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_8_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_9_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_ldst_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_wen_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_ren1_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_ren2_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_ren3_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_swap12_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_swap23_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_fp_ctrl_typeTagIn_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_fp_ctrl_typeTagOut_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_fromint_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_toint_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_fastpipe_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_fma_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_div_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_sqrt_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_wflags_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_vec_0; // @[issue-slot.scala:49:7]
wire [31:0] io_out_uop_inst_0; // @[issue-slot.scala:49:7]
wire [31:0] io_out_uop_debug_inst_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_rvc_0; // @[issue-slot.scala:49:7]
wire [39:0] io_out_uop_debug_pc_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_issued_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_p1_bypass_hint_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_p2_bypass_hint_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_p3_bypass_hint_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_dis_col_sel_0; // @[issue-slot.scala:49:7]
wire [11:0] io_out_uop_br_mask_0; // @[issue-slot.scala:49:7]
wire [3:0] io_out_uop_br_tag_0; // @[issue-slot.scala:49:7]
wire [3:0] io_out_uop_br_type_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_sfb_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_fence_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_fencei_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_sfence_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_amo_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_eret_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_sys_pc2epc_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_rocc_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_mov_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_ftq_idx_0; // @[issue-slot.scala:49:7]
wire io_out_uop_edge_inst_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_pc_lob_0; // @[issue-slot.scala:49:7]
wire io_out_uop_taken_0; // @[issue-slot.scala:49:7]
wire io_out_uop_imm_rename_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_imm_sel_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_pimm_0; // @[issue-slot.scala:49:7]
wire [19:0] io_out_uop_imm_packed_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_op1_sel_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_op2_sel_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_rob_idx_0; // @[issue-slot.scala:49:7]
wire [3:0] io_out_uop_ldq_idx_0; // @[issue-slot.scala:49:7]
wire [3:0] io_out_uop_stq_idx_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_rxq_idx_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_pdst_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_prs1_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_prs2_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_prs3_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_ppred_0; // @[issue-slot.scala:49:7]
wire io_out_uop_prs1_busy_0; // @[issue-slot.scala:49:7]
wire io_out_uop_prs2_busy_0; // @[issue-slot.scala:49:7]
wire io_out_uop_prs3_busy_0; // @[issue-slot.scala:49:7]
wire io_out_uop_ppred_busy_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_stale_pdst_0; // @[issue-slot.scala:49:7]
wire io_out_uop_exception_0; // @[issue-slot.scala:49:7]
wire [63:0] io_out_uop_exc_cause_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_mem_cmd_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_mem_size_0; // @[issue-slot.scala:49:7]
wire io_out_uop_mem_signed_0; // @[issue-slot.scala:49:7]
wire io_out_uop_uses_ldq_0; // @[issue-slot.scala:49:7]
wire io_out_uop_uses_stq_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_unique_0; // @[issue-slot.scala:49:7]
wire io_out_uop_flush_on_commit_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_csr_cmd_0; // @[issue-slot.scala:49:7]
wire io_out_uop_ldst_is_rs1_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_ldst_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_lrs1_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_lrs2_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_lrs3_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_dst_rtype_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_lrs1_rtype_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_lrs2_rtype_0; // @[issue-slot.scala:49:7]
wire io_out_uop_frs3_en_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fcn_dw_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_fcn_op_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_val_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_fp_rm_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_fp_typ_0; // @[issue-slot.scala:49:7]
wire io_out_uop_xcpt_pf_if_0; // @[issue-slot.scala:49:7]
wire io_out_uop_xcpt_ae_if_0; // @[issue-slot.scala:49:7]
wire io_out_uop_xcpt_ma_if_0; // @[issue-slot.scala:49:7]
wire io_out_uop_bp_debug_if_0; // @[issue-slot.scala:49:7]
wire io_out_uop_bp_xcpt_if_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_debug_fsrc_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_debug_tsrc_0; // @[issue-slot.scala:49:7]
wire io_valid_0; // @[issue-slot.scala:49:7]
wire io_will_be_valid_0; // @[issue-slot.scala:49:7]
wire io_request_0; // @[issue-slot.scala:49:7]
reg slot_valid; // @[issue-slot.scala:55:27]
assign io_valid_0 = slot_valid; // @[issue-slot.scala:49:7, :55:27]
reg [31:0] slot_uop_inst; // @[issue-slot.scala:56:21]
assign io_iss_uop_inst_0 = slot_uop_inst; // @[issue-slot.scala:49:7, :56:21]
wire [31:0] next_uop_out_inst = slot_uop_inst; // @[util.scala:104:23]
reg [31:0] slot_uop_debug_inst; // @[issue-slot.scala:56:21]
assign io_iss_uop_debug_inst_0 = slot_uop_debug_inst; // @[issue-slot.scala:49:7, :56:21]
wire [31:0] next_uop_out_debug_inst = slot_uop_debug_inst; // @[util.scala:104:23]
reg slot_uop_is_rvc; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_rvc_0 = slot_uop_is_rvc; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_rvc = slot_uop_is_rvc; // @[util.scala:104:23]
reg [39:0] slot_uop_debug_pc; // @[issue-slot.scala:56:21]
assign io_iss_uop_debug_pc_0 = slot_uop_debug_pc; // @[issue-slot.scala:49:7, :56:21]
wire [39:0] next_uop_out_debug_pc = slot_uop_debug_pc; // @[util.scala:104:23]
reg slot_uop_iq_type_0; // @[issue-slot.scala:56:21]
assign io_iss_uop_iq_type_0_0 = slot_uop_iq_type_0; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iq_type_0 = slot_uop_iq_type_0; // @[util.scala:104:23]
reg slot_uop_iq_type_1; // @[issue-slot.scala:56:21]
assign io_iss_uop_iq_type_1_0 = slot_uop_iq_type_1; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iq_type_1 = slot_uop_iq_type_1; // @[util.scala:104:23]
reg slot_uop_iq_type_2; // @[issue-slot.scala:56:21]
assign io_iss_uop_iq_type_2_0 = slot_uop_iq_type_2; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iq_type_2 = slot_uop_iq_type_2; // @[util.scala:104:23]
reg slot_uop_iq_type_3; // @[issue-slot.scala:56:21]
assign io_iss_uop_iq_type_3_0 = slot_uop_iq_type_3; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iq_type_3 = slot_uop_iq_type_3; // @[util.scala:104:23]
reg slot_uop_fu_code_0; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_0_0 = slot_uop_fu_code_0; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_0 = slot_uop_fu_code_0; // @[util.scala:104:23]
reg slot_uop_fu_code_1; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_1_0 = slot_uop_fu_code_1; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_1 = slot_uop_fu_code_1; // @[util.scala:104:23]
reg slot_uop_fu_code_2; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_2_0 = slot_uop_fu_code_2; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_2 = slot_uop_fu_code_2; // @[util.scala:104:23]
reg slot_uop_fu_code_3; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_3_0 = slot_uop_fu_code_3; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_3 = slot_uop_fu_code_3; // @[util.scala:104:23]
reg slot_uop_fu_code_4; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_4_0 = slot_uop_fu_code_4; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_4 = slot_uop_fu_code_4; // @[util.scala:104:23]
reg slot_uop_fu_code_5; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_5_0 = slot_uop_fu_code_5; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_5 = slot_uop_fu_code_5; // @[util.scala:104:23]
reg slot_uop_fu_code_6; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_6_0 = slot_uop_fu_code_6; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_6 = slot_uop_fu_code_6; // @[util.scala:104:23]
reg slot_uop_fu_code_7; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_7_0 = slot_uop_fu_code_7; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_7 = slot_uop_fu_code_7; // @[util.scala:104:23]
reg slot_uop_fu_code_8; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_8_0 = slot_uop_fu_code_8; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_8 = slot_uop_fu_code_8; // @[util.scala:104:23]
reg slot_uop_fu_code_9; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_9_0 = slot_uop_fu_code_9; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_9 = slot_uop_fu_code_9; // @[util.scala:104:23]
reg slot_uop_iw_issued; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_issued_0 = slot_uop_iw_issued; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iw_issued = slot_uop_iw_issued; // @[util.scala:104:23]
reg [1:0] slot_uop_iw_p1_speculative_child; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_p1_speculative_child_0 = slot_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_iw_p1_speculative_child = slot_uop_iw_p1_speculative_child; // @[util.scala:104:23]
reg [1:0] slot_uop_iw_p2_speculative_child; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_p2_speculative_child_0 = slot_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_iw_p2_speculative_child = slot_uop_iw_p2_speculative_child; // @[util.scala:104:23]
reg slot_uop_iw_p1_bypass_hint; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_p1_bypass_hint_0 = slot_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iw_p1_bypass_hint = slot_uop_iw_p1_bypass_hint; // @[util.scala:104:23]
reg slot_uop_iw_p2_bypass_hint; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_p2_bypass_hint_0 = slot_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iw_p2_bypass_hint = slot_uop_iw_p2_bypass_hint; // @[util.scala:104:23]
reg slot_uop_iw_p3_bypass_hint; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_p3_bypass_hint_0 = slot_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iw_p3_bypass_hint = slot_uop_iw_p3_bypass_hint; // @[util.scala:104:23]
reg [1:0] slot_uop_dis_col_sel; // @[issue-slot.scala:56:21]
assign io_iss_uop_dis_col_sel_0 = slot_uop_dis_col_sel; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_dis_col_sel = slot_uop_dis_col_sel; // @[util.scala:104:23]
reg [11:0] slot_uop_br_mask; // @[issue-slot.scala:56:21]
assign io_iss_uop_br_mask_0 = slot_uop_br_mask; // @[issue-slot.scala:49:7, :56:21]
reg [3:0] slot_uop_br_tag; // @[issue-slot.scala:56:21]
assign io_iss_uop_br_tag_0 = slot_uop_br_tag; // @[issue-slot.scala:49:7, :56:21]
wire [3:0] next_uop_out_br_tag = slot_uop_br_tag; // @[util.scala:104:23]
reg [3:0] slot_uop_br_type; // @[issue-slot.scala:56:21]
assign io_iss_uop_br_type_0 = slot_uop_br_type; // @[issue-slot.scala:49:7, :56:21]
wire [3:0] next_uop_out_br_type = slot_uop_br_type; // @[util.scala:104:23]
reg slot_uop_is_sfb; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_sfb_0 = slot_uop_is_sfb; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_sfb = slot_uop_is_sfb; // @[util.scala:104:23]
reg slot_uop_is_fence; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_fence_0 = slot_uop_is_fence; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_fence = slot_uop_is_fence; // @[util.scala:104:23]
reg slot_uop_is_fencei; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_fencei_0 = slot_uop_is_fencei; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_fencei = slot_uop_is_fencei; // @[util.scala:104:23]
reg slot_uop_is_sfence; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_sfence_0 = slot_uop_is_sfence; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_sfence = slot_uop_is_sfence; // @[util.scala:104:23]
reg slot_uop_is_amo; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_amo_0 = slot_uop_is_amo; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_amo = slot_uop_is_amo; // @[util.scala:104:23]
reg slot_uop_is_eret; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_eret_0 = slot_uop_is_eret; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_eret = slot_uop_is_eret; // @[util.scala:104:23]
reg slot_uop_is_sys_pc2epc; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_sys_pc2epc_0 = slot_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_sys_pc2epc = slot_uop_is_sys_pc2epc; // @[util.scala:104:23]
reg slot_uop_is_rocc; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_rocc_0 = slot_uop_is_rocc; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_rocc = slot_uop_is_rocc; // @[util.scala:104:23]
reg slot_uop_is_mov; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_mov_0 = slot_uop_is_mov; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_mov = slot_uop_is_mov; // @[util.scala:104:23]
reg [4:0] slot_uop_ftq_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_ftq_idx_0 = slot_uop_ftq_idx; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_ftq_idx = slot_uop_ftq_idx; // @[util.scala:104:23]
reg slot_uop_edge_inst; // @[issue-slot.scala:56:21]
assign io_iss_uop_edge_inst_0 = slot_uop_edge_inst; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_edge_inst = slot_uop_edge_inst; // @[util.scala:104:23]
reg [5:0] slot_uop_pc_lob; // @[issue-slot.scala:56:21]
assign io_iss_uop_pc_lob_0 = slot_uop_pc_lob; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_pc_lob = slot_uop_pc_lob; // @[util.scala:104:23]
reg slot_uop_taken; // @[issue-slot.scala:56:21]
assign io_iss_uop_taken_0 = slot_uop_taken; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_taken = slot_uop_taken; // @[util.scala:104:23]
reg slot_uop_imm_rename; // @[issue-slot.scala:56:21]
assign io_iss_uop_imm_rename_0 = slot_uop_imm_rename; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_imm_rename = slot_uop_imm_rename; // @[util.scala:104:23]
reg [2:0] slot_uop_imm_sel; // @[issue-slot.scala:56:21]
assign io_iss_uop_imm_sel_0 = slot_uop_imm_sel; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_imm_sel = slot_uop_imm_sel; // @[util.scala:104:23]
reg [4:0] slot_uop_pimm; // @[issue-slot.scala:56:21]
assign io_iss_uop_pimm_0 = slot_uop_pimm; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_pimm = slot_uop_pimm; // @[util.scala:104:23]
reg [19:0] slot_uop_imm_packed; // @[issue-slot.scala:56:21]
assign io_iss_uop_imm_packed_0 = slot_uop_imm_packed; // @[issue-slot.scala:49:7, :56:21]
wire [19:0] next_uop_out_imm_packed = slot_uop_imm_packed; // @[util.scala:104:23]
reg [1:0] slot_uop_op1_sel; // @[issue-slot.scala:56:21]
assign io_iss_uop_op1_sel_0 = slot_uop_op1_sel; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_op1_sel = slot_uop_op1_sel; // @[util.scala:104:23]
reg [2:0] slot_uop_op2_sel; // @[issue-slot.scala:56:21]
assign io_iss_uop_op2_sel_0 = slot_uop_op2_sel; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_op2_sel = slot_uop_op2_sel; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_ldst; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_ldst_0 = slot_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_ldst = slot_uop_fp_ctrl_ldst; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_wen; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_wen_0 = slot_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_wen = slot_uop_fp_ctrl_wen; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_ren1; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_ren1_0 = slot_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_ren1 = slot_uop_fp_ctrl_ren1; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_ren2; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_ren2_0 = slot_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_ren2 = slot_uop_fp_ctrl_ren2; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_ren3; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_ren3_0 = slot_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_ren3 = slot_uop_fp_ctrl_ren3; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_swap12; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_swap12_0 = slot_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_swap12 = slot_uop_fp_ctrl_swap12; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_swap23; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_swap23_0 = slot_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_swap23 = slot_uop_fp_ctrl_swap23; // @[util.scala:104:23]
reg [1:0] slot_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_typeTagIn_0 = slot_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_fp_ctrl_typeTagIn = slot_uop_fp_ctrl_typeTagIn; // @[util.scala:104:23]
reg [1:0] slot_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_typeTagOut_0 = slot_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_fp_ctrl_typeTagOut = slot_uop_fp_ctrl_typeTagOut; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_fromint; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_fromint_0 = slot_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_fromint = slot_uop_fp_ctrl_fromint; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_toint; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_toint_0 = slot_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_toint = slot_uop_fp_ctrl_toint; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_fastpipe_0 = slot_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_fastpipe = slot_uop_fp_ctrl_fastpipe; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_fma; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_fma_0 = slot_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_fma = slot_uop_fp_ctrl_fma; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_div; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_div_0 = slot_uop_fp_ctrl_div; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_div = slot_uop_fp_ctrl_div; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_sqrt; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_sqrt_0 = slot_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_sqrt = slot_uop_fp_ctrl_sqrt; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_wflags; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_wflags_0 = slot_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_wflags = slot_uop_fp_ctrl_wflags; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_vec; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_vec_0 = slot_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_vec = slot_uop_fp_ctrl_vec; // @[util.scala:104:23]
reg [5:0] slot_uop_rob_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_rob_idx_0 = slot_uop_rob_idx; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_rob_idx = slot_uop_rob_idx; // @[util.scala:104:23]
reg [3:0] slot_uop_ldq_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_ldq_idx_0 = slot_uop_ldq_idx; // @[issue-slot.scala:49:7, :56:21]
wire [3:0] next_uop_out_ldq_idx = slot_uop_ldq_idx; // @[util.scala:104:23]
reg [3:0] slot_uop_stq_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_stq_idx_0 = slot_uop_stq_idx; // @[issue-slot.scala:49:7, :56:21]
wire [3:0] next_uop_out_stq_idx = slot_uop_stq_idx; // @[util.scala:104:23]
reg [1:0] slot_uop_rxq_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_rxq_idx_0 = slot_uop_rxq_idx; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_rxq_idx = slot_uop_rxq_idx; // @[util.scala:104:23]
reg [6:0] slot_uop_pdst; // @[issue-slot.scala:56:21]
assign io_iss_uop_pdst_0 = slot_uop_pdst; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_pdst = slot_uop_pdst; // @[util.scala:104:23]
reg [6:0] slot_uop_prs1; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs1_0 = slot_uop_prs1; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_prs1 = slot_uop_prs1; // @[util.scala:104:23]
reg [6:0] slot_uop_prs2; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs2_0 = slot_uop_prs2; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_prs2 = slot_uop_prs2; // @[util.scala:104:23]
reg [6:0] slot_uop_prs3; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs3_0 = slot_uop_prs3; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_prs3 = slot_uop_prs3; // @[util.scala:104:23]
reg [4:0] slot_uop_ppred; // @[issue-slot.scala:56:21]
assign io_iss_uop_ppred_0 = slot_uop_ppred; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_ppred = slot_uop_ppred; // @[util.scala:104:23]
reg slot_uop_prs1_busy; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs1_busy_0 = slot_uop_prs1_busy; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_prs1_busy = slot_uop_prs1_busy; // @[util.scala:104:23]
reg slot_uop_prs2_busy; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs2_busy_0 = slot_uop_prs2_busy; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_prs2_busy = slot_uop_prs2_busy; // @[util.scala:104:23]
reg slot_uop_prs3_busy; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs3_busy_0 = slot_uop_prs3_busy; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_prs3_busy = slot_uop_prs3_busy; // @[util.scala:104:23]
wire _iss_ready_T_6 = slot_uop_prs3_busy; // @[issue-slot.scala:56:21, :136:131]
reg slot_uop_ppred_busy; // @[issue-slot.scala:56:21]
assign io_iss_uop_ppred_busy_0 = slot_uop_ppred_busy; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_ppred_busy = slot_uop_ppred_busy; // @[util.scala:104:23]
wire _iss_ready_T_3 = slot_uop_ppred_busy; // @[issue-slot.scala:56:21, :136:88]
wire _agen_ready_T_2 = slot_uop_ppred_busy; // @[issue-slot.scala:56:21, :137:95]
wire _dgen_ready_T_2 = slot_uop_ppred_busy; // @[issue-slot.scala:56:21, :138:95]
reg [6:0] slot_uop_stale_pdst; // @[issue-slot.scala:56:21]
assign io_iss_uop_stale_pdst_0 = slot_uop_stale_pdst; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_stale_pdst = slot_uop_stale_pdst; // @[util.scala:104:23]
reg slot_uop_exception; // @[issue-slot.scala:56:21]
assign io_iss_uop_exception_0 = slot_uop_exception; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_exception = slot_uop_exception; // @[util.scala:104:23]
reg [63:0] slot_uop_exc_cause; // @[issue-slot.scala:56:21]
assign io_iss_uop_exc_cause_0 = slot_uop_exc_cause; // @[issue-slot.scala:49:7, :56:21]
wire [63:0] next_uop_out_exc_cause = slot_uop_exc_cause; // @[util.scala:104:23]
reg [4:0] slot_uop_mem_cmd; // @[issue-slot.scala:56:21]
assign io_iss_uop_mem_cmd_0 = slot_uop_mem_cmd; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_mem_cmd = slot_uop_mem_cmd; // @[util.scala:104:23]
reg [1:0] slot_uop_mem_size; // @[issue-slot.scala:56:21]
assign io_iss_uop_mem_size_0 = slot_uop_mem_size; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_mem_size = slot_uop_mem_size; // @[util.scala:104:23]
reg slot_uop_mem_signed; // @[issue-slot.scala:56:21]
assign io_iss_uop_mem_signed_0 = slot_uop_mem_signed; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_mem_signed = slot_uop_mem_signed; // @[util.scala:104:23]
reg slot_uop_uses_ldq; // @[issue-slot.scala:56:21]
assign io_iss_uop_uses_ldq_0 = slot_uop_uses_ldq; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_uses_ldq = slot_uop_uses_ldq; // @[util.scala:104:23]
reg slot_uop_uses_stq; // @[issue-slot.scala:56:21]
assign io_iss_uop_uses_stq_0 = slot_uop_uses_stq; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_uses_stq = slot_uop_uses_stq; // @[util.scala:104:23]
reg slot_uop_is_unique; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_unique_0 = slot_uop_is_unique; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_unique = slot_uop_is_unique; // @[util.scala:104:23]
reg slot_uop_flush_on_commit; // @[issue-slot.scala:56:21]
assign io_iss_uop_flush_on_commit_0 = slot_uop_flush_on_commit; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_flush_on_commit = slot_uop_flush_on_commit; // @[util.scala:104:23]
reg [2:0] slot_uop_csr_cmd; // @[issue-slot.scala:56:21]
assign io_iss_uop_csr_cmd_0 = slot_uop_csr_cmd; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_csr_cmd = slot_uop_csr_cmd; // @[util.scala:104:23]
reg slot_uop_ldst_is_rs1; // @[issue-slot.scala:56:21]
assign io_iss_uop_ldst_is_rs1_0 = slot_uop_ldst_is_rs1; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_ldst_is_rs1 = slot_uop_ldst_is_rs1; // @[util.scala:104:23]
reg [5:0] slot_uop_ldst; // @[issue-slot.scala:56:21]
assign io_iss_uop_ldst_0 = slot_uop_ldst; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_ldst = slot_uop_ldst; // @[util.scala:104:23]
reg [5:0] slot_uop_lrs1; // @[issue-slot.scala:56:21]
assign io_iss_uop_lrs1_0 = slot_uop_lrs1; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_lrs1 = slot_uop_lrs1; // @[util.scala:104:23]
reg [5:0] slot_uop_lrs2; // @[issue-slot.scala:56:21]
assign io_iss_uop_lrs2_0 = slot_uop_lrs2; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_lrs2 = slot_uop_lrs2; // @[util.scala:104:23]
reg [5:0] slot_uop_lrs3; // @[issue-slot.scala:56:21]
assign io_iss_uop_lrs3_0 = slot_uop_lrs3; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_lrs3 = slot_uop_lrs3; // @[util.scala:104:23]
reg [1:0] slot_uop_dst_rtype; // @[issue-slot.scala:56:21]
assign io_iss_uop_dst_rtype_0 = slot_uop_dst_rtype; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_dst_rtype = slot_uop_dst_rtype; // @[util.scala:104:23]
reg [1:0] slot_uop_lrs1_rtype; // @[issue-slot.scala:56:21]
assign io_iss_uop_lrs1_rtype_0 = slot_uop_lrs1_rtype; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_lrs1_rtype = slot_uop_lrs1_rtype; // @[util.scala:104:23]
reg [1:0] slot_uop_lrs2_rtype; // @[issue-slot.scala:56:21]
assign io_iss_uop_lrs2_rtype_0 = slot_uop_lrs2_rtype; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_lrs2_rtype = slot_uop_lrs2_rtype; // @[util.scala:104:23]
reg slot_uop_frs3_en; // @[issue-slot.scala:56:21]
assign io_iss_uop_frs3_en_0 = slot_uop_frs3_en; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_frs3_en = slot_uop_frs3_en; // @[util.scala:104:23]
reg slot_uop_fcn_dw; // @[issue-slot.scala:56:21]
assign io_iss_uop_fcn_dw_0 = slot_uop_fcn_dw; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fcn_dw = slot_uop_fcn_dw; // @[util.scala:104:23]
reg [4:0] slot_uop_fcn_op; // @[issue-slot.scala:56:21]
assign io_iss_uop_fcn_op_0 = slot_uop_fcn_op; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_fcn_op = slot_uop_fcn_op; // @[util.scala:104:23]
reg slot_uop_fp_val; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_val_0 = slot_uop_fp_val; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_val = slot_uop_fp_val; // @[util.scala:104:23]
reg [2:0] slot_uop_fp_rm; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_rm_0 = slot_uop_fp_rm; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_fp_rm = slot_uop_fp_rm; // @[util.scala:104:23]
reg [1:0] slot_uop_fp_typ; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_typ_0 = slot_uop_fp_typ; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_fp_typ = slot_uop_fp_typ; // @[util.scala:104:23]
reg slot_uop_xcpt_pf_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_xcpt_pf_if_0 = slot_uop_xcpt_pf_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_xcpt_pf_if = slot_uop_xcpt_pf_if; // @[util.scala:104:23]
reg slot_uop_xcpt_ae_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_xcpt_ae_if_0 = slot_uop_xcpt_ae_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_xcpt_ae_if = slot_uop_xcpt_ae_if; // @[util.scala:104:23]
reg slot_uop_xcpt_ma_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_xcpt_ma_if_0 = slot_uop_xcpt_ma_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_xcpt_ma_if = slot_uop_xcpt_ma_if; // @[util.scala:104:23]
reg slot_uop_bp_debug_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_bp_debug_if_0 = slot_uop_bp_debug_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_bp_debug_if = slot_uop_bp_debug_if; // @[util.scala:104:23]
reg slot_uop_bp_xcpt_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_bp_xcpt_if_0 = slot_uop_bp_xcpt_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_bp_xcpt_if = slot_uop_bp_xcpt_if; // @[util.scala:104:23]
reg [2:0] slot_uop_debug_fsrc; // @[issue-slot.scala:56:21]
assign io_iss_uop_debug_fsrc_0 = slot_uop_debug_fsrc; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_debug_fsrc = slot_uop_debug_fsrc; // @[util.scala:104:23]
reg [2:0] slot_uop_debug_tsrc; // @[issue-slot.scala:56:21]
assign io_iss_uop_debug_tsrc_0 = slot_uop_debug_tsrc; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_debug_tsrc = slot_uop_debug_tsrc; // @[util.scala:104:23]
wire next_valid; // @[issue-slot.scala:58:28]
assign next_uop_inst = next_uop_out_inst; // @[util.scala:104:23]
assign next_uop_debug_inst = next_uop_out_debug_inst; // @[util.scala:104:23]
assign next_uop_is_rvc = next_uop_out_is_rvc; // @[util.scala:104:23]
assign next_uop_debug_pc = next_uop_out_debug_pc; // @[util.scala:104:23]
assign next_uop_iq_type_0 = next_uop_out_iq_type_0; // @[util.scala:104:23]
assign next_uop_iq_type_1 = next_uop_out_iq_type_1; // @[util.scala:104:23]
assign next_uop_iq_type_2 = next_uop_out_iq_type_2; // @[util.scala:104:23]
assign next_uop_iq_type_3 = next_uop_out_iq_type_3; // @[util.scala:104:23]
assign next_uop_fu_code_0 = next_uop_out_fu_code_0; // @[util.scala:104:23]
assign next_uop_fu_code_1 = next_uop_out_fu_code_1; // @[util.scala:104:23]
assign next_uop_fu_code_2 = next_uop_out_fu_code_2; // @[util.scala:104:23]
assign next_uop_fu_code_3 = next_uop_out_fu_code_3; // @[util.scala:104:23]
assign next_uop_fu_code_4 = next_uop_out_fu_code_4; // @[util.scala:104:23]
assign next_uop_fu_code_5 = next_uop_out_fu_code_5; // @[util.scala:104:23]
assign next_uop_fu_code_6 = next_uop_out_fu_code_6; // @[util.scala:104:23]
assign next_uop_fu_code_7 = next_uop_out_fu_code_7; // @[util.scala:104:23]
assign next_uop_fu_code_8 = next_uop_out_fu_code_8; // @[util.scala:104:23]
assign next_uop_fu_code_9 = next_uop_out_fu_code_9; // @[util.scala:104:23]
wire [11:0] _next_uop_out_br_mask_T_1; // @[util.scala:93:25]
assign next_uop_dis_col_sel = next_uop_out_dis_col_sel; // @[util.scala:104:23]
assign next_uop_br_mask = next_uop_out_br_mask; // @[util.scala:104:23]
assign next_uop_br_tag = next_uop_out_br_tag; // @[util.scala:104:23]
assign next_uop_br_type = next_uop_out_br_type; // @[util.scala:104:23]
assign next_uop_is_sfb = next_uop_out_is_sfb; // @[util.scala:104:23]
assign next_uop_is_fence = next_uop_out_is_fence; // @[util.scala:104:23]
assign next_uop_is_fencei = next_uop_out_is_fencei; // @[util.scala:104:23]
assign next_uop_is_sfence = next_uop_out_is_sfence; // @[util.scala:104:23]
assign next_uop_is_amo = next_uop_out_is_amo; // @[util.scala:104:23]
assign next_uop_is_eret = next_uop_out_is_eret; // @[util.scala:104:23]
assign next_uop_is_sys_pc2epc = next_uop_out_is_sys_pc2epc; // @[util.scala:104:23]
assign next_uop_is_rocc = next_uop_out_is_rocc; // @[util.scala:104:23]
assign next_uop_is_mov = next_uop_out_is_mov; // @[util.scala:104:23]
assign next_uop_ftq_idx = next_uop_out_ftq_idx; // @[util.scala:104:23]
assign next_uop_edge_inst = next_uop_out_edge_inst; // @[util.scala:104:23]
assign next_uop_pc_lob = next_uop_out_pc_lob; // @[util.scala:104:23]
assign next_uop_taken = next_uop_out_taken; // @[util.scala:104:23]
assign next_uop_imm_rename = next_uop_out_imm_rename; // @[util.scala:104:23]
assign next_uop_imm_sel = next_uop_out_imm_sel; // @[util.scala:104:23]
assign next_uop_pimm = next_uop_out_pimm; // @[util.scala:104:23]
assign next_uop_imm_packed = next_uop_out_imm_packed; // @[util.scala:104:23]
assign next_uop_op1_sel = next_uop_out_op1_sel; // @[util.scala:104:23]
assign next_uop_op2_sel = next_uop_out_op2_sel; // @[util.scala:104:23]
assign next_uop_fp_ctrl_ldst = next_uop_out_fp_ctrl_ldst; // @[util.scala:104:23]
assign next_uop_fp_ctrl_wen = next_uop_out_fp_ctrl_wen; // @[util.scala:104:23]
assign next_uop_fp_ctrl_ren1 = next_uop_out_fp_ctrl_ren1; // @[util.scala:104:23]
assign next_uop_fp_ctrl_ren2 = next_uop_out_fp_ctrl_ren2; // @[util.scala:104:23]
assign next_uop_fp_ctrl_ren3 = next_uop_out_fp_ctrl_ren3; // @[util.scala:104:23]
assign next_uop_fp_ctrl_swap12 = next_uop_out_fp_ctrl_swap12; // @[util.scala:104:23]
assign next_uop_fp_ctrl_swap23 = next_uop_out_fp_ctrl_swap23; // @[util.scala:104:23]
assign next_uop_fp_ctrl_typeTagIn = next_uop_out_fp_ctrl_typeTagIn; // @[util.scala:104:23]
assign next_uop_fp_ctrl_typeTagOut = next_uop_out_fp_ctrl_typeTagOut; // @[util.scala:104:23]
assign next_uop_fp_ctrl_fromint = next_uop_out_fp_ctrl_fromint; // @[util.scala:104:23]
assign next_uop_fp_ctrl_toint = next_uop_out_fp_ctrl_toint; // @[util.scala:104:23]
assign next_uop_fp_ctrl_fastpipe = next_uop_out_fp_ctrl_fastpipe; // @[util.scala:104:23]
assign next_uop_fp_ctrl_fma = next_uop_out_fp_ctrl_fma; // @[util.scala:104:23]
assign next_uop_fp_ctrl_div = next_uop_out_fp_ctrl_div; // @[util.scala:104:23]
assign next_uop_fp_ctrl_sqrt = next_uop_out_fp_ctrl_sqrt; // @[util.scala:104:23]
assign next_uop_fp_ctrl_wflags = next_uop_out_fp_ctrl_wflags; // @[util.scala:104:23]
assign next_uop_fp_ctrl_vec = next_uop_out_fp_ctrl_vec; // @[util.scala:104:23]
assign next_uop_rob_idx = next_uop_out_rob_idx; // @[util.scala:104:23]
assign next_uop_ldq_idx = next_uop_out_ldq_idx; // @[util.scala:104:23]
assign next_uop_stq_idx = next_uop_out_stq_idx; // @[util.scala:104:23]
assign next_uop_rxq_idx = next_uop_out_rxq_idx; // @[util.scala:104:23]
assign next_uop_pdst = next_uop_out_pdst; // @[util.scala:104:23]
assign next_uop_prs1 = next_uop_out_prs1; // @[util.scala:104:23]
assign next_uop_prs2 = next_uop_out_prs2; // @[util.scala:104:23]
assign next_uop_prs3 = next_uop_out_prs3; // @[util.scala:104:23]
assign next_uop_ppred = next_uop_out_ppred; // @[util.scala:104:23]
assign next_uop_ppred_busy = next_uop_out_ppred_busy; // @[util.scala:104:23]
assign next_uop_stale_pdst = next_uop_out_stale_pdst; // @[util.scala:104:23]
assign next_uop_exception = next_uop_out_exception; // @[util.scala:104:23]
assign next_uop_exc_cause = next_uop_out_exc_cause; // @[util.scala:104:23]
assign next_uop_mem_cmd = next_uop_out_mem_cmd; // @[util.scala:104:23]
assign next_uop_mem_size = next_uop_out_mem_size; // @[util.scala:104:23]
assign next_uop_mem_signed = next_uop_out_mem_signed; // @[util.scala:104:23]
assign next_uop_uses_ldq = next_uop_out_uses_ldq; // @[util.scala:104:23]
assign next_uop_uses_stq = next_uop_out_uses_stq; // @[util.scala:104:23]
assign next_uop_is_unique = next_uop_out_is_unique; // @[util.scala:104:23]
assign next_uop_flush_on_commit = next_uop_out_flush_on_commit; // @[util.scala:104:23]
assign next_uop_csr_cmd = next_uop_out_csr_cmd; // @[util.scala:104:23]
assign next_uop_ldst_is_rs1 = next_uop_out_ldst_is_rs1; // @[util.scala:104:23]
assign next_uop_ldst = next_uop_out_ldst; // @[util.scala:104:23]
assign next_uop_lrs1 = next_uop_out_lrs1; // @[util.scala:104:23]
assign next_uop_lrs2 = next_uop_out_lrs2; // @[util.scala:104:23]
assign next_uop_lrs3 = next_uop_out_lrs3; // @[util.scala:104:23]
assign next_uop_dst_rtype = next_uop_out_dst_rtype; // @[util.scala:104:23]
assign next_uop_lrs1_rtype = next_uop_out_lrs1_rtype; // @[util.scala:104:23]
assign next_uop_lrs2_rtype = next_uop_out_lrs2_rtype; // @[util.scala:104:23]
assign next_uop_frs3_en = next_uop_out_frs3_en; // @[util.scala:104:23]
assign next_uop_fcn_dw = next_uop_out_fcn_dw; // @[util.scala:104:23]
assign next_uop_fcn_op = next_uop_out_fcn_op; // @[util.scala:104:23]
assign next_uop_fp_val = next_uop_out_fp_val; // @[util.scala:104:23]
assign next_uop_fp_rm = next_uop_out_fp_rm; // @[util.scala:104:23]
assign next_uop_fp_typ = next_uop_out_fp_typ; // @[util.scala:104:23]
assign next_uop_xcpt_pf_if = next_uop_out_xcpt_pf_if; // @[util.scala:104:23]
assign next_uop_xcpt_ae_if = next_uop_out_xcpt_ae_if; // @[util.scala:104:23]
assign next_uop_xcpt_ma_if = next_uop_out_xcpt_ma_if; // @[util.scala:104:23]
assign next_uop_bp_debug_if = next_uop_out_bp_debug_if; // @[util.scala:104:23]
assign next_uop_bp_xcpt_if = next_uop_out_bp_xcpt_if; // @[util.scala:104:23]
assign next_uop_debug_fsrc = next_uop_out_debug_fsrc; // @[util.scala:104:23]
assign next_uop_debug_tsrc = next_uop_out_debug_tsrc; // @[util.scala:104:23]
wire [11:0] _next_uop_out_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:93:27]
assign _next_uop_out_br_mask_T_1 = slot_uop_br_mask & _next_uop_out_br_mask_T; // @[util.scala:93:{25,27}]
assign next_uop_out_br_mask = _next_uop_out_br_mask_T_1; // @[util.scala:93:25, :104:23]
assign io_out_uop_inst_0 = next_uop_inst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_debug_inst_0 = next_uop_debug_inst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_rvc_0 = next_uop_is_rvc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_debug_pc_0 = next_uop_debug_pc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iq_type_0_0 = next_uop_iq_type_0; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iq_type_1_0 = next_uop_iq_type_1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iq_type_2_0 = next_uop_iq_type_2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iq_type_3_0 = next_uop_iq_type_3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_0_0 = next_uop_fu_code_0; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_1_0 = next_uop_fu_code_1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_2_0 = next_uop_fu_code_2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_3_0 = next_uop_fu_code_3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_4_0 = next_uop_fu_code_4; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_5_0 = next_uop_fu_code_5; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_6_0 = next_uop_fu_code_6; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_7_0 = next_uop_fu_code_7; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_8_0 = next_uop_fu_code_8; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_9_0 = next_uop_fu_code_9; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_issued_0 = next_uop_iw_issued; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_p1_bypass_hint_0 = next_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_p2_bypass_hint_0 = next_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_p3_bypass_hint_0 = next_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_dis_col_sel_0 = next_uop_dis_col_sel; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_br_mask_0 = next_uop_br_mask; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_br_tag_0 = next_uop_br_tag; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_br_type_0 = next_uop_br_type; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_sfb_0 = next_uop_is_sfb; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_fence_0 = next_uop_is_fence; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_fencei_0 = next_uop_is_fencei; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_sfence_0 = next_uop_is_sfence; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_amo_0 = next_uop_is_amo; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_eret_0 = next_uop_is_eret; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_sys_pc2epc_0 = next_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_rocc_0 = next_uop_is_rocc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_mov_0 = next_uop_is_mov; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ftq_idx_0 = next_uop_ftq_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_edge_inst_0 = next_uop_edge_inst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_pc_lob_0 = next_uop_pc_lob; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_taken_0 = next_uop_taken; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_imm_rename_0 = next_uop_imm_rename; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_imm_sel_0 = next_uop_imm_sel; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_pimm_0 = next_uop_pimm; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_imm_packed_0 = next_uop_imm_packed; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_op1_sel_0 = next_uop_op1_sel; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_op2_sel_0 = next_uop_op2_sel; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_ldst_0 = next_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_wen_0 = next_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_ren1_0 = next_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_ren2_0 = next_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_ren3_0 = next_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_swap12_0 = next_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_swap23_0 = next_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_typeTagIn_0 = next_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_typeTagOut_0 = next_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_fromint_0 = next_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_toint_0 = next_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_fastpipe_0 = next_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_fma_0 = next_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_div_0 = next_uop_fp_ctrl_div; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_sqrt_0 = next_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_wflags_0 = next_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_vec_0 = next_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_rob_idx_0 = next_uop_rob_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ldq_idx_0 = next_uop_ldq_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_stq_idx_0 = next_uop_stq_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_rxq_idx_0 = next_uop_rxq_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_pdst_0 = next_uop_pdst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs1_0 = next_uop_prs1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs2_0 = next_uop_prs2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs3_0 = next_uop_prs3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ppred_0 = next_uop_ppred; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs1_busy_0 = next_uop_prs1_busy; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs2_busy_0 = next_uop_prs2_busy; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs3_busy_0 = next_uop_prs3_busy; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ppred_busy_0 = next_uop_ppred_busy; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_stale_pdst_0 = next_uop_stale_pdst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_exception_0 = next_uop_exception; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_exc_cause_0 = next_uop_exc_cause; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_mem_cmd_0 = next_uop_mem_cmd; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_mem_size_0 = next_uop_mem_size; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_mem_signed_0 = next_uop_mem_signed; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_uses_ldq_0 = next_uop_uses_ldq; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_uses_stq_0 = next_uop_uses_stq; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_unique_0 = next_uop_is_unique; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_flush_on_commit_0 = next_uop_flush_on_commit; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_csr_cmd_0 = next_uop_csr_cmd; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ldst_is_rs1_0 = next_uop_ldst_is_rs1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ldst_0 = next_uop_ldst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs1_0 = next_uop_lrs1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs2_0 = next_uop_lrs2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs3_0 = next_uop_lrs3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_dst_rtype_0 = next_uop_dst_rtype; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs1_rtype_0 = next_uop_lrs1_rtype; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs2_rtype_0 = next_uop_lrs2_rtype; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_frs3_en_0 = next_uop_frs3_en; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fcn_dw_0 = next_uop_fcn_dw; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fcn_op_0 = next_uop_fcn_op; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_val_0 = next_uop_fp_val; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_rm_0 = next_uop_fp_rm; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_typ_0 = next_uop_fp_typ; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_xcpt_pf_if_0 = next_uop_xcpt_pf_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_xcpt_ae_if_0 = next_uop_xcpt_ae_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_xcpt_ma_if_0 = next_uop_xcpt_ma_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_bp_debug_if_0 = next_uop_bp_debug_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_bp_xcpt_if_0 = next_uop_bp_xcpt_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_debug_fsrc_0 = next_uop_debug_fsrc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_debug_tsrc_0 = next_uop_debug_tsrc; // @[issue-slot.scala:49:7, :59:28]
wire [11:0] _killed_T = io_brupdate_b1_mispredict_mask_0 & slot_uop_br_mask; // @[util.scala:126:51]
wire _killed_T_1 = |_killed_T; // @[util.scala:126:{51,59}]
wire killed = _killed_T_1 | io_kill_0; // @[util.scala:61:61, :126:59]
wire _io_will_be_valid_T = ~killed; // @[util.scala:61:61]
assign _io_will_be_valid_T_1 = next_valid & _io_will_be_valid_T; // @[issue-slot.scala:58:28, :65:{34,37}]
assign io_will_be_valid_0 = _io_will_be_valid_T_1; // @[issue-slot.scala:49:7, :65:34]
wire _slot_valid_T = ~killed; // @[util.scala:61:61]
wire _slot_valid_T_1 = next_valid & _slot_valid_T; // @[issue-slot.scala:58:28, :74:{30,33}] |
Generate the Verilog code corresponding to the following Chisel files.
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File RegisterRouter.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.diplomacy.{AddressSet, TransferSizes}
import freechips.rocketchip.resources.{Device, Resource, ResourceBindings}
import freechips.rocketchip.prci.{NoCrossing}
import freechips.rocketchip.regmapper.{RegField, RegMapper, RegMapperParams, RegMapperInput, RegisterRouter}
import freechips.rocketchip.util.{BundleField, ControlKey, ElaborationArtefacts, GenRegDescsAnno}
import scala.math.min
class TLRegisterRouterExtraBundle(val sourceBits: Int, val sizeBits: Int) extends Bundle {
val source = UInt((sourceBits max 1).W)
val size = UInt((sizeBits max 1).W)
}
case object TLRegisterRouterExtra extends ControlKey[TLRegisterRouterExtraBundle]("tlrr_extra")
case class TLRegisterRouterExtraField(sourceBits: Int, sizeBits: Int) extends BundleField[TLRegisterRouterExtraBundle](TLRegisterRouterExtra, Output(new TLRegisterRouterExtraBundle(sourceBits, sizeBits)), x => {
x.size := 0.U
x.source := 0.U
})
/** TLRegisterNode is a specialized TL SinkNode that encapsulates MMIO registers.
* It provides functionality for describing and outputting metdata about the registers in several formats.
* It also provides a concrete implementation of a regmap function that will be used
* to wire a map of internal registers associated with this node to the node's interconnect port.
*/
case class TLRegisterNode(
address: Seq[AddressSet],
device: Device,
deviceKey: String = "reg/control",
concurrency: Int = 0,
beatBytes: Int = 4,
undefZero: Boolean = true,
executable: Boolean = false)(
implicit valName: ValName)
extends SinkNode(TLImp)(Seq(TLSlavePortParameters.v1(
Seq(TLSlaveParameters.v1(
address = address,
resources = Seq(Resource(device, deviceKey)),
executable = executable,
supportsGet = TransferSizes(1, beatBytes),
supportsPutPartial = TransferSizes(1, beatBytes),
supportsPutFull = TransferSizes(1, beatBytes),
fifoId = Some(0))), // requests are handled in order
beatBytes = beatBytes,
minLatency = min(concurrency, 1)))) with TLFormatNode // the Queue adds at most one cycle
{
val size = 1 << log2Ceil(1 + address.map(_.max).max - address.map(_.base).min)
require (size >= beatBytes)
address.foreach { case a =>
require (a.widen(size-1).base == address.head.widen(size-1).base,
s"TLRegisterNode addresses (${address}) must be aligned to its size ${size}")
}
// Calling this method causes the matching TL2 bundle to be
// configured to route all requests to the listed RegFields.
def regmap(mapping: RegField.Map*) = {
val (bundleIn, edge) = this.in(0)
val a = bundleIn.a
val d = bundleIn.d
val fields = TLRegisterRouterExtraField(edge.bundle.sourceBits, edge.bundle.sizeBits) +: a.bits.params.echoFields
val params = RegMapperParams(log2Up(size/beatBytes), beatBytes, fields)
val in = Wire(Decoupled(new RegMapperInput(params)))
in.bits.read := a.bits.opcode === TLMessages.Get
in.bits.index := edge.addr_hi(a.bits)
in.bits.data := a.bits.data
in.bits.mask := a.bits.mask
Connectable.waiveUnmatched(in.bits.extra, a.bits.echo) match {
case (lhs, rhs) => lhs :<= rhs
}
val a_extra = in.bits.extra(TLRegisterRouterExtra)
a_extra.source := a.bits.source
a_extra.size := a.bits.size
// Invoke the register map builder
val out = RegMapper(beatBytes, concurrency, undefZero, in, mapping:_*)
// No flow control needed
in.valid := a.valid
a.ready := in.ready
d.valid := out.valid
out.ready := d.ready
// We must restore the size to enable width adapters to work
val d_extra = out.bits.extra(TLRegisterRouterExtra)
d.bits := edge.AccessAck(toSource = d_extra.source, lgSize = d_extra.size)
// avoid a Mux on the data bus by manually overriding two fields
d.bits.data := out.bits.data
Connectable.waiveUnmatched(d.bits.echo, out.bits.extra) match {
case (lhs, rhs) => lhs :<= rhs
}
d.bits.opcode := Mux(out.bits.read, TLMessages.AccessAckData, TLMessages.AccessAck)
// Tie off unused channels
bundleIn.b.valid := false.B
bundleIn.c.ready := true.B
bundleIn.e.ready := true.B
genRegDescsJson(mapping:_*)
}
def genRegDescsJson(mapping: RegField.Map*): Unit = {
// Dump out the register map for documentation purposes.
val base = address.head.base
val baseHex = s"0x${base.toInt.toHexString}"
val name = s"${device.describe(ResourceBindings()).name}.At${baseHex}"
val json = GenRegDescsAnno.serialize(base, name, mapping:_*)
var suffix = 0
while( ElaborationArtefacts.contains(s"${baseHex}.${suffix}.regmap.json")) {
suffix = suffix + 1
}
ElaborationArtefacts.add(s"${baseHex}.${suffix}.regmap.json", json)
val module = Module.currentModule.get.asInstanceOf[RawModule]
GenRegDescsAnno.anno(
module,
base,
mapping:_*)
}
}
/** Mix HasTLControlRegMap into any subclass of RegisterRouter to gain helper functions for attaching a device control register map to TileLink.
* - The intended use case is that controlNode will diplomatically publish a SW-visible device's memory-mapped control registers.
* - Use the clock crossing helper controlXing to externally connect controlNode to a TileLink interconnect.
* - Use the mapping helper function regmap to internally fill out the space of device control registers.
*/
trait HasTLControlRegMap { this: RegisterRouter =>
protected val controlNode = TLRegisterNode(
address = address,
device = device,
deviceKey = "reg/control",
concurrency = concurrency,
beatBytes = beatBytes,
undefZero = undefZero,
executable = executable)
// Externally, this helper should be used to connect the register control port to a bus
val controlXing: TLInwardClockCrossingHelper = this.crossIn(controlNode)
// Backwards-compatibility default node accessor with no clock crossing
lazy val node: TLInwardNode = controlXing(NoCrossing)
// Internally, this function should be used to populate the control port with registers
protected def regmap(mapping: RegField.Map*): Unit = { controlNode.regmap(mapping:_*) }
}
File TileClockGater.scala:
package chipyard.clocking
import chisel3._
import chisel3.util._
import chisel3.experimental.Analog
import org.chipsalliance.cde.config._
import freechips.rocketchip.subsystem._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.prci._
import freechips.rocketchip.util._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.devices.tilelink._
import freechips.rocketchip.regmapper._
import freechips.rocketchip.subsystem._
/** This node adds clock gating control registers.
* If deploying on a platform which does not support clock gating, deasserting the enable
* flag will generate the registers, preserving the same memory map and behavior, but will not
* generate any gaters
*/
class TileClockGater(address: BigInt, beatBytes: Int)(implicit p: Parameters, valName: ValName) extends LazyModule
{
val device = new SimpleDevice(s"clock-gater", Nil)
val clockNode = ClockGroupIdentityNode()
val tlNode = TLRegisterNode(Seq(AddressSet(address, 4096-1)), device, "reg/control", beatBytes=beatBytes)
lazy val module = new LazyModuleImp(this) {
val sources = clockNode.in.head._1.member.data.toSeq
val sinks = clockNode.out.head._1.member.elements.toSeq
val nSinks = sinks.size
val regs = (0 until nSinks).map({i =>
val sinkName = sinks(i)._1
val reg = withReset(sources(i).reset) { Module(new AsyncResetRegVec(w=1, init=1)) }
if (sinkName.contains("tile")) {
println(s"${(address+i*4).toString(16)}: Tile $sinkName clock gate")
sinks(i)._2.clock := ClockGate(sources(i).clock, reg.io.q.asBool)
sinks(i)._2.reset := sources(i).reset
} else {
sinks(i)._2 := sources(i)
}
reg
})
tlNode.regmap((0 until nSinks).map({i =>
i*4 -> Seq(RegField.rwReg(1, regs(i).io))
}): _*)
}
}
File MuxLiteral.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.log2Ceil
import scala.reflect.ClassTag
/* MuxLiteral creates a lookup table from a key to a list of values.
* Unlike MuxLookup, the table keys must be exclusive literals.
*/
object MuxLiteral
{
def apply[T <: Data:ClassTag](index: UInt, default: T, first: (UInt, T), rest: (UInt, T)*): T =
apply(index, default, first :: rest.toList)
def apply[T <: Data:ClassTag](index: UInt, default: T, cases: Seq[(UInt, T)]): T =
MuxTable(index, default, cases.map { case (k, v) => (k.litValue, v) })
}
object MuxSeq
{
def apply[T <: Data:ClassTag](index: UInt, default: T, first: T, rest: T*): T =
apply(index, default, first :: rest.toList)
def apply[T <: Data:ClassTag](index: UInt, default: T, cases: Seq[T]): T =
MuxTable(index, default, cases.zipWithIndex.map { case (v, i) => (BigInt(i), v) })
}
object MuxTable
{
def apply[T <: Data:ClassTag](index: UInt, default: T, first: (BigInt, T), rest: (BigInt, T)*): T =
apply(index, default, first :: rest.toList)
def apply[T <: Data:ClassTag](index: UInt, default: T, cases: Seq[(BigInt, T)]): T = {
/* All keys must be >= 0 and distinct */
cases.foreach { case (k, _) => require (k >= 0) }
require (cases.map(_._1).distinct.size == cases.size)
/* Filter out any cases identical to the default */
val simple = cases.filter { case (k, v) => !default.isLit || !v.isLit || v.litValue != default.litValue }
val maxKey = (BigInt(0) +: simple.map(_._1)).max
val endIndex = BigInt(1) << log2Ceil(maxKey+1)
if (simple.isEmpty) {
default
} else if (endIndex <= 2*simple.size) {
/* The dense encoding case uses a Vec */
val table = Array.fill(endIndex.toInt) { default }
simple.foreach { case (k, v) => table(k.toInt) = v }
Mux(index >= endIndex.U, default, VecInit(table)(index))
} else {
/* The sparse encoding case uses switch */
val out = WireDefault(default)
simple.foldLeft(new chisel3.util.SwitchContext(index, None, Set.empty)) { case (acc, (k, v)) =>
acc.is (k.U) { out := v }
}
out
}
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TileClockGater( // @[TileClockGater.scala:27:25]
input clock, // @[TileClockGater.scala:27:25]
input reset, // @[TileClockGater.scala:27:25]
output auto_clock_gater_in_1_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_clock_gater_in_1_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_clock_gater_in_1_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_clock_gater_in_1_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_clock_gater_in_1_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [10:0] auto_clock_gater_in_1_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [20:0] auto_clock_gater_in_1_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_clock_gater_in_1_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_clock_gater_in_1_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_clock_gater_in_1_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_clock_gater_in_1_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_clock_gater_in_1_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_clock_gater_in_1_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_clock_gater_in_1_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [10:0] auto_clock_gater_in_1_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_clock_gater_in_1_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_clock_gater_in_0_member_allClocks_uncore_clock, // @[LazyModuleImp.scala:107:25]
input auto_clock_gater_in_0_member_allClocks_uncore_reset, // @[LazyModuleImp.scala:107:25]
output auto_clock_gater_out_member_allClocks_uncore_clock, // @[LazyModuleImp.scala:107:25]
output auto_clock_gater_out_member_allClocks_uncore_reset // @[LazyModuleImp.scala:107:25]
);
wire out_front_valid; // @[RegisterRouter.scala:87:24]
wire out_front_ready; // @[RegisterRouter.scala:87:24]
wire out_bits_read; // @[RegisterRouter.scala:87:24]
wire [10:0] out_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:87:24]
wire [8:0] in_bits_index; // @[RegisterRouter.scala:73:18]
wire in_bits_read; // @[RegisterRouter.scala:73:18]
wire auto_clock_gater_in_1_a_valid_0 = auto_clock_gater_in_1_a_valid; // @[TileClockGater.scala:27:25]
wire [2:0] auto_clock_gater_in_1_a_bits_opcode_0 = auto_clock_gater_in_1_a_bits_opcode; // @[TileClockGater.scala:27:25]
wire [2:0] auto_clock_gater_in_1_a_bits_param_0 = auto_clock_gater_in_1_a_bits_param; // @[TileClockGater.scala:27:25]
wire [1:0] auto_clock_gater_in_1_a_bits_size_0 = auto_clock_gater_in_1_a_bits_size; // @[TileClockGater.scala:27:25]
wire [10:0] auto_clock_gater_in_1_a_bits_source_0 = auto_clock_gater_in_1_a_bits_source; // @[TileClockGater.scala:27:25]
wire [20:0] auto_clock_gater_in_1_a_bits_address_0 = auto_clock_gater_in_1_a_bits_address; // @[TileClockGater.scala:27:25]
wire [7:0] auto_clock_gater_in_1_a_bits_mask_0 = auto_clock_gater_in_1_a_bits_mask; // @[TileClockGater.scala:27:25]
wire [63:0] auto_clock_gater_in_1_a_bits_data_0 = auto_clock_gater_in_1_a_bits_data; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_in_1_a_bits_corrupt_0 = auto_clock_gater_in_1_a_bits_corrupt; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_in_1_d_ready_0 = auto_clock_gater_in_1_d_ready; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_in_0_member_allClocks_uncore_clock_0 = auto_clock_gater_in_0_member_allClocks_uncore_clock; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_in_0_member_allClocks_uncore_reset_0 = auto_clock_gater_in_0_member_allClocks_uncore_reset; // @[TileClockGater.scala:27:25]
wire [1:0] _out_frontSel_T = 2'h1; // @[OneHot.scala:58:35]
wire [1:0] _out_backSel_T = 2'h1; // @[OneHot.scala:58:35]
wire [8:0] out_maskMatch = 9'h1FF; // @[RegisterRouter.scala:87:24]
wire out_frontSel_0 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_backSel_0 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_rifireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_5 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_rifireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_wifireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_6 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_wifireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_rofireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_5 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_rofireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_wofireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_6 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_wofireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_iready = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_oready = 1'h1; // @[RegisterRouter.scala:87:24]
wire [2:0] clock_gaterIn_d_bits_d_opcode = 3'h0; // @[Edges.scala:792:17]
wire [63:0] clock_gaterIn_d_bits_d_data = 64'h0; // @[Edges.scala:792:17]
wire auto_clock_gater_in_1_d_bits_sink = 1'h0; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_in_1_d_bits_denied = 1'h0; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_in_1_d_bits_corrupt = 1'h0; // @[TileClockGater.scala:27:25]
wire clock_gaterIn_1_d_bits_sink = 1'h0; // @[MixedNode.scala:551:17]
wire clock_gaterIn_1_d_bits_denied = 1'h0; // @[MixedNode.scala:551:17]
wire clock_gaterIn_1_d_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire out_frontSel_1 = 1'h0; // @[RegisterRouter.scala:87:24]
wire out_backSel_1 = 1'h0; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_6 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_wifireMux_T_7 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_rofireMux_T_6 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_wofireMux_T_7 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_out_bits_data_T = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_out_bits_data_T_2 = 1'h0; // @[MuxLiteral.scala:49:17]
wire clock_gaterIn_d_bits_d_sink = 1'h0; // @[Edges.scala:792:17]
wire clock_gaterIn_d_bits_d_denied = 1'h0; // @[Edges.scala:792:17]
wire clock_gaterIn_d_bits_d_corrupt = 1'h0; // @[Edges.scala:792:17]
wire [1:0] auto_clock_gater_in_1_d_bits_param = 2'h0; // @[TileClockGater.scala:27:25]
wire clock_gaterIn_1_a_ready; // @[MixedNode.scala:551:17]
wire [1:0] clock_gaterIn_1_d_bits_param = 2'h0; // @[MixedNode.scala:551:17]
wire [1:0] clock_gaterIn_d_bits_d_param = 2'h0; // @[Edges.scala:792:17]
wire clock_gaterIn_1_a_valid = auto_clock_gater_in_1_a_valid_0; // @[MixedNode.scala:551:17]
wire [2:0] clock_gaterIn_1_a_bits_opcode = auto_clock_gater_in_1_a_bits_opcode_0; // @[MixedNode.scala:551:17]
wire [2:0] clock_gaterIn_1_a_bits_param = auto_clock_gater_in_1_a_bits_param_0; // @[MixedNode.scala:551:17]
wire [1:0] clock_gaterIn_1_a_bits_size = auto_clock_gater_in_1_a_bits_size_0; // @[MixedNode.scala:551:17]
wire [10:0] clock_gaterIn_1_a_bits_source = auto_clock_gater_in_1_a_bits_source_0; // @[MixedNode.scala:551:17]
wire [20:0] clock_gaterIn_1_a_bits_address = auto_clock_gater_in_1_a_bits_address_0; // @[MixedNode.scala:551:17]
wire [7:0] clock_gaterIn_1_a_bits_mask = auto_clock_gater_in_1_a_bits_mask_0; // @[MixedNode.scala:551:17]
wire [63:0] clock_gaterIn_1_a_bits_data = auto_clock_gater_in_1_a_bits_data_0; // @[MixedNode.scala:551:17]
wire clock_gaterIn_1_a_bits_corrupt = auto_clock_gater_in_1_a_bits_corrupt_0; // @[MixedNode.scala:551:17]
wire clock_gaterIn_1_d_ready = auto_clock_gater_in_1_d_ready_0; // @[MixedNode.scala:551:17]
wire clock_gaterIn_1_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] clock_gaterIn_1_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] clock_gaterIn_1_d_bits_size; // @[MixedNode.scala:551:17]
wire [10:0] clock_gaterIn_1_d_bits_source; // @[MixedNode.scala:551:17]
wire [63:0] clock_gaterIn_1_d_bits_data; // @[MixedNode.scala:551:17]
wire clock_gaterIn_member_allClocks_uncore_clock = auto_clock_gater_in_0_member_allClocks_uncore_clock_0; // @[MixedNode.scala:551:17]
wire clock_gaterOut_member_allClocks_uncore_clock; // @[MixedNode.scala:542:17]
wire clock_gaterIn_member_allClocks_uncore_reset = auto_clock_gater_in_0_member_allClocks_uncore_reset_0; // @[MixedNode.scala:551:17]
wire clock_gaterOut_member_allClocks_uncore_reset; // @[MixedNode.scala:542:17]
wire auto_clock_gater_in_1_a_ready_0; // @[TileClockGater.scala:27:25]
wire [2:0] auto_clock_gater_in_1_d_bits_opcode_0; // @[TileClockGater.scala:27:25]
wire [1:0] auto_clock_gater_in_1_d_bits_size_0; // @[TileClockGater.scala:27:25]
wire [10:0] auto_clock_gater_in_1_d_bits_source_0; // @[TileClockGater.scala:27:25]
wire [63:0] auto_clock_gater_in_1_d_bits_data_0; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_in_1_d_valid_0; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_out_member_allClocks_uncore_clock_0; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_out_member_allClocks_uncore_reset_0; // @[TileClockGater.scala:27:25]
assign auto_clock_gater_out_member_allClocks_uncore_clock_0 = clock_gaterOut_member_allClocks_uncore_clock; // @[MixedNode.scala:542:17]
assign auto_clock_gater_out_member_allClocks_uncore_reset_0 = clock_gaterOut_member_allClocks_uncore_reset; // @[MixedNode.scala:542:17]
assign clock_gaterOut_member_allClocks_uncore_clock = clock_gaterIn_member_allClocks_uncore_clock; // @[MixedNode.scala:542:17, :551:17]
assign clock_gaterOut_member_allClocks_uncore_reset = clock_gaterIn_member_allClocks_uncore_reset; // @[MixedNode.scala:542:17, :551:17]
wire in_ready; // @[RegisterRouter.scala:73:18]
assign auto_clock_gater_in_1_a_ready_0 = clock_gaterIn_1_a_ready; // @[MixedNode.scala:551:17]
wire in_valid = clock_gaterIn_1_a_valid; // @[RegisterRouter.scala:73:18]
wire [1:0] in_bits_extra_tlrr_extra_size = clock_gaterIn_1_a_bits_size; // @[RegisterRouter.scala:73:18]
wire [10:0] in_bits_extra_tlrr_extra_source = clock_gaterIn_1_a_bits_source; // @[RegisterRouter.scala:73:18]
wire [7:0] in_bits_mask = clock_gaterIn_1_a_bits_mask; // @[RegisterRouter.scala:73:18]
wire [63:0] in_bits_data = clock_gaterIn_1_a_bits_data; // @[RegisterRouter.scala:73:18]
wire out_ready = clock_gaterIn_1_d_ready; // @[RegisterRouter.scala:87:24]
wire out_valid; // @[RegisterRouter.scala:87:24]
assign auto_clock_gater_in_1_d_valid_0 = clock_gaterIn_1_d_valid; // @[MixedNode.scala:551:17]
assign auto_clock_gater_in_1_d_bits_opcode_0 = clock_gaterIn_1_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] clock_gaterIn_d_bits_d_size; // @[Edges.scala:792:17]
assign auto_clock_gater_in_1_d_bits_size_0 = clock_gaterIn_1_d_bits_size; // @[MixedNode.scala:551:17]
wire [10:0] clock_gaterIn_d_bits_d_source; // @[Edges.scala:792:17]
assign auto_clock_gater_in_1_d_bits_source_0 = clock_gaterIn_1_d_bits_source; // @[MixedNode.scala:551:17]
wire [63:0] out_bits_data; // @[RegisterRouter.scala:87:24]
assign auto_clock_gater_in_1_d_bits_data_0 = clock_gaterIn_1_d_bits_data; // @[MixedNode.scala:551:17]
wire _out_in_ready_T; // @[RegisterRouter.scala:87:24]
assign clock_gaterIn_1_a_ready = in_ready; // @[RegisterRouter.scala:73:18]
wire _in_bits_read_T; // @[RegisterRouter.scala:74:36]
wire _out_front_valid_T = in_valid; // @[RegisterRouter.scala:73:18, :87:24]
wire out_front_bits_read = in_bits_read; // @[RegisterRouter.scala:73:18, :87:24]
wire [8:0] out_front_bits_index = in_bits_index; // @[RegisterRouter.scala:73:18, :87:24]
wire [63:0] out_front_bits_data = in_bits_data; // @[RegisterRouter.scala:73:18, :87:24]
wire [7:0] out_front_bits_mask = in_bits_mask; // @[RegisterRouter.scala:73:18, :87:24]
wire [10:0] out_front_bits_extra_tlrr_extra_source = in_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:73:18, :87:24]
wire [1:0] out_front_bits_extra_tlrr_extra_size = in_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:73:18, :87:24]
assign _in_bits_read_T = clock_gaterIn_1_a_bits_opcode == 3'h4; // @[RegisterRouter.scala:74:36]
assign in_bits_read = _in_bits_read_T; // @[RegisterRouter.scala:73:18, :74:36]
wire [17:0] _in_bits_index_T = clock_gaterIn_1_a_bits_address[20:3]; // @[Edges.scala:192:34]
assign in_bits_index = _in_bits_index_T[8:0]; // @[RegisterRouter.scala:73:18, :75:19]
wire _out_front_ready_T = out_ready; // @[RegisterRouter.scala:87:24]
wire _out_out_valid_T; // @[RegisterRouter.scala:87:24]
assign clock_gaterIn_1_d_valid = out_valid; // @[RegisterRouter.scala:87:24]
wire _clock_gaterIn_d_bits_opcode_T = out_bits_read; // @[RegisterRouter.scala:87:24, :105:25]
assign clock_gaterIn_1_d_bits_data = out_bits_data; // @[RegisterRouter.scala:87:24]
assign clock_gaterIn_d_bits_d_source = out_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:87:24]
wire [1:0] out_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:87:24]
assign clock_gaterIn_d_bits_d_size = out_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:87:24]
assign _out_in_ready_T = out_front_ready; // @[RegisterRouter.scala:87:24]
assign _out_out_valid_T = out_front_valid; // @[RegisterRouter.scala:87:24]
assign out_bits_read = out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire [8:0] out_findex = out_front_bits_index; // @[RegisterRouter.scala:87:24]
wire [8:0] out_bindex = out_front_bits_index; // @[RegisterRouter.scala:87:24]
assign out_bits_extra_tlrr_extra_source = out_front_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:87:24]
assign out_bits_extra_tlrr_extra_size = out_front_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:87:24]
wire _out_T = out_findex == 9'h0; // @[RegisterRouter.scala:87:24]
wire _out_T_1 = out_bindex == 9'h0; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_out_bits_data_WIRE_0 = _out_T_1; // @[MuxLiteral.scala:49:48]
wire out_rivalid_0; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_4; // @[RegisterRouter.scala:87:24]
wire out_wivalid_0; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_3; // @[RegisterRouter.scala:87:24]
wire out_roready_0; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_4; // @[RegisterRouter.scala:87:24]
wire out_woready_0; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T = out_front_bits_mask[0]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T = out_front_bits_mask[0]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_1 = out_front_bits_mask[1]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_1 = out_front_bits_mask[1]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_2 = out_front_bits_mask[2]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_2 = out_front_bits_mask[2]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_3 = out_front_bits_mask[3]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_3 = out_front_bits_mask[3]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_4 = out_front_bits_mask[4]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_4 = out_front_bits_mask[4]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_5 = out_front_bits_mask[5]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_5 = out_front_bits_mask[5]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_6 = out_front_bits_mask[6]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_6 = out_front_bits_mask[6]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_7 = out_front_bits_mask[7]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_7 = out_front_bits_mask[7]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_8 = {8{_out_frontMask_T}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_9 = {8{_out_frontMask_T_1}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_10 = {8{_out_frontMask_T_2}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_11 = {8{_out_frontMask_T_3}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_12 = {8{_out_frontMask_T_4}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_13 = {8{_out_frontMask_T_5}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_14 = {8{_out_frontMask_T_6}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_15 = {8{_out_frontMask_T_7}}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_lo_lo = {_out_frontMask_T_9, _out_frontMask_T_8}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_lo_hi = {_out_frontMask_T_11, _out_frontMask_T_10}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_frontMask_lo = {out_frontMask_lo_hi, out_frontMask_lo_lo}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_hi_lo = {_out_frontMask_T_13, _out_frontMask_T_12}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_hi_hi = {_out_frontMask_T_15, _out_frontMask_T_14}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_frontMask_hi = {out_frontMask_hi_hi, out_frontMask_hi_lo}; // @[RegisterRouter.scala:87:24]
wire [63:0] out_frontMask = {out_frontMask_hi, out_frontMask_lo}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_8 = {8{_out_backMask_T}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_9 = {8{_out_backMask_T_1}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_10 = {8{_out_backMask_T_2}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_11 = {8{_out_backMask_T_3}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_12 = {8{_out_backMask_T_4}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_13 = {8{_out_backMask_T_5}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_14 = {8{_out_backMask_T_6}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_15 = {8{_out_backMask_T_7}}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_lo_lo = {_out_backMask_T_9, _out_backMask_T_8}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_lo_hi = {_out_backMask_T_11, _out_backMask_T_10}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_backMask_lo = {out_backMask_lo_hi, out_backMask_lo_lo}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_hi_lo = {_out_backMask_T_13, _out_backMask_T_12}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_hi_hi = {_out_backMask_T_15, _out_backMask_T_14}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_backMask_hi = {out_backMask_hi_hi, out_backMask_hi_lo}; // @[RegisterRouter.scala:87:24]
wire [63:0] out_backMask = {out_backMask_hi, out_backMask_lo}; // @[RegisterRouter.scala:87:24]
wire _out_rimask_T = out_frontMask[0]; // @[RegisterRouter.scala:87:24]
wire _out_wimask_T = out_frontMask[0]; // @[RegisterRouter.scala:87:24]
wire out_rimask = _out_rimask_T; // @[RegisterRouter.scala:87:24]
wire out_wimask = _out_wimask_T; // @[RegisterRouter.scala:87:24]
wire _out_romask_T = out_backMask[0]; // @[RegisterRouter.scala:87:24]
wire _out_womask_T = out_backMask[0]; // @[RegisterRouter.scala:87:24]
wire out_romask = _out_romask_T; // @[RegisterRouter.scala:87:24]
wire out_womask = _out_womask_T; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid = out_rivalid_0 & out_rimask; // @[RegisterRouter.scala:87:24]
wire out_f_roready = out_roready_0 & out_romask; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid = out_wivalid_0 & out_wimask; // @[RegisterRouter.scala:87:24]
wire out_f_woready = out_woready_0 & out_womask; // @[RegisterRouter.scala:87:24]
wire _out_T_2 = out_front_bits_data[0]; // @[RegisterRouter.scala:87:24]
wire _out_T_3 = ~out_rimask; // @[RegisterRouter.scala:87:24]
wire _out_T_4 = ~out_wimask; // @[RegisterRouter.scala:87:24]
wire _out_T_5 = ~out_romask; // @[RegisterRouter.scala:87:24]
wire _out_T_6 = ~out_womask; // @[RegisterRouter.scala:87:24]
wire _out_T_7; // @[RegisterRouter.scala:87:24]
wire _out_T_8 = _out_T_7; // @[RegisterRouter.scala:87:24]
wire _out_out_bits_data_WIRE_1_0 = _out_T_8; // @[MuxLiteral.scala:49:48]
wire _GEN = in_valid & out_front_ready; // @[RegisterRouter.scala:73:18, :87:24]
wire _out_rifireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_rifireMux_T = _GEN; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_wifireMux_T = _GEN; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_1 = _out_rifireMux_T & out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_2 = _out_rifireMux_T_1; // @[RegisterRouter.scala:87:24]
assign _out_rifireMux_T_3 = _out_rifireMux_T_2 & _out_T; // @[RegisterRouter.scala:87:24]
assign out_rivalid_0 = _out_rifireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_4 = ~_out_T; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_1 = ~out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_2 = _out_wifireMux_T & _out_wifireMux_T_1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_3 = _out_wifireMux_T_2; // @[RegisterRouter.scala:87:24]
assign _out_wifireMux_T_4 = _out_wifireMux_T_3 & _out_T; // @[RegisterRouter.scala:87:24]
assign out_wivalid_0 = _out_wifireMux_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_5 = ~_out_T; // @[RegisterRouter.scala:87:24]
wire _GEN_0 = out_front_valid & out_ready; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_rofireMux_T = _GEN_0; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_wofireMux_T = _GEN_0; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_1 = _out_rofireMux_T & out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_2 = _out_rofireMux_T_1; // @[RegisterRouter.scala:87:24]
assign _out_rofireMux_T_3 = _out_rofireMux_T_2 & _out_T_1; // @[RegisterRouter.scala:87:24]
assign out_roready_0 = _out_rofireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_4 = ~_out_T_1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_1 = ~out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_2 = _out_wofireMux_T & _out_wofireMux_T_1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_3 = _out_wofireMux_T_2; // @[RegisterRouter.scala:87:24]
assign _out_wofireMux_T_4 = _out_wofireMux_T_3 & _out_T_1; // @[RegisterRouter.scala:87:24]
assign out_woready_0 = _out_wofireMux_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_5 = ~_out_T_1; // @[RegisterRouter.scala:87:24]
assign in_ready = _out_in_ready_T; // @[RegisterRouter.scala:73:18, :87:24]
assign out_front_valid = _out_front_valid_T; // @[RegisterRouter.scala:87:24]
assign out_front_ready = _out_front_ready_T; // @[RegisterRouter.scala:87:24]
assign out_valid = _out_out_valid_T; // @[RegisterRouter.scala:87:24]
wire _out_out_bits_data_T_1 = _out_out_bits_data_WIRE_0; // @[MuxLiteral.scala:49:{10,48}]
wire _out_out_bits_data_T_3 = _out_out_bits_data_WIRE_1_0; // @[MuxLiteral.scala:49:{10,48}]
wire _out_out_bits_data_T_4 = _out_out_bits_data_T_1 & _out_out_bits_data_T_3; // @[MuxLiteral.scala:49:10]
assign out_bits_data = {63'h0, _out_out_bits_data_T_4}; // @[RegisterRouter.scala:87:24]
assign clock_gaterIn_1_d_bits_size = clock_gaterIn_d_bits_d_size; // @[Edges.scala:792:17]
assign clock_gaterIn_1_d_bits_source = clock_gaterIn_d_bits_d_source; // @[Edges.scala:792:17]
assign clock_gaterIn_1_d_bits_opcode = {2'h0, _clock_gaterIn_d_bits_opcode_T}; // @[RegisterRouter.scala:105:{19,25}]
TLMonitor_59 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (clock_gaterIn_1_a_ready), // @[MixedNode.scala:551:17]
.io_in_a_valid (clock_gaterIn_1_a_valid), // @[MixedNode.scala:551:17]
.io_in_a_bits_opcode (clock_gaterIn_1_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_a_bits_param (clock_gaterIn_1_a_bits_param), // @[MixedNode.scala:551:17]
.io_in_a_bits_size (clock_gaterIn_1_a_bits_size), // @[MixedNode.scala:551:17]
.io_in_a_bits_source (clock_gaterIn_1_a_bits_source), // @[MixedNode.scala:551:17]
.io_in_a_bits_address (clock_gaterIn_1_a_bits_address), // @[MixedNode.scala:551:17]
.io_in_a_bits_mask (clock_gaterIn_1_a_bits_mask), // @[MixedNode.scala:551:17]
.io_in_a_bits_data (clock_gaterIn_1_a_bits_data), // @[MixedNode.scala:551:17]
.io_in_a_bits_corrupt (clock_gaterIn_1_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_d_ready (clock_gaterIn_1_d_ready), // @[MixedNode.scala:551:17]
.io_in_d_valid (clock_gaterIn_1_d_valid), // @[MixedNode.scala:551:17]
.io_in_d_bits_opcode (clock_gaterIn_1_d_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_d_bits_size (clock_gaterIn_1_d_bits_size), // @[MixedNode.scala:551:17]
.io_in_d_bits_source (clock_gaterIn_1_d_bits_source), // @[MixedNode.scala:551:17]
.io_in_d_bits_data (clock_gaterIn_1_d_bits_data) // @[MixedNode.scala:551:17]
); // @[Nodes.scala:27:25]
AsyncResetRegVec_w1_i1 regs_0 ( // @[TileClockGater.scala:33:53]
.clock (clock),
.reset (clock_gaterIn_member_allClocks_uncore_reset), // @[MixedNode.scala:551:17]
.io_d (_out_T_2), // @[RegisterRouter.scala:87:24]
.io_q (_out_T_7),
.io_en (out_f_woready) // @[RegisterRouter.scala:87:24]
); // @[TileClockGater.scala:33:53]
assign auto_clock_gater_in_1_a_ready = auto_clock_gater_in_1_a_ready_0; // @[TileClockGater.scala:27:25]
assign auto_clock_gater_in_1_d_valid = auto_clock_gater_in_1_d_valid_0; // @[TileClockGater.scala:27:25]
assign auto_clock_gater_in_1_d_bits_opcode = auto_clock_gater_in_1_d_bits_opcode_0; // @[TileClockGater.scala:27:25]
assign auto_clock_gater_in_1_d_bits_size = auto_clock_gater_in_1_d_bits_size_0; // @[TileClockGater.scala:27:25]
assign auto_clock_gater_in_1_d_bits_source = auto_clock_gater_in_1_d_bits_source_0; // @[TileClockGater.scala:27:25]
assign auto_clock_gater_in_1_d_bits_data = auto_clock_gater_in_1_d_bits_data_0; // @[TileClockGater.scala:27:25]
assign auto_clock_gater_out_member_allClocks_uncore_clock = auto_clock_gater_out_member_allClocks_uncore_clock_0; // @[TileClockGater.scala:27:25]
assign auto_clock_gater_out_member_allClocks_uncore_reset = auto_clock_gater_out_member_allClocks_uncore_reset_0; // @[TileClockGater.scala:27:25]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RouteComputer.scala:
package constellation.router
import chisel3._
import chisel3.util._
import chisel3.util.experimental.decode.{TruthTable, decoder}
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import freechips.rocketchip.rocket.DecodeLogic
import constellation.channel._
import constellation.routing.{FlowRoutingBundle, FlowRoutingInfo}
import constellation.noc.{HasNoCParams}
class RouteComputerReq(implicit val p: Parameters) extends Bundle with HasNoCParams {
val src_virt_id = UInt(virtualChannelBits.W)
val flow = new FlowRoutingBundle
}
class RouteComputerResp(
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Bundle
with HasRouterOutputParams {
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
}
class RouteComputer(
val routerParams: RouterParams,
val inParams: Seq[ChannelParams],
val outParams: Seq[ChannelParams],
val ingressParams: Seq[IngressChannelParams],
val egressParams: Seq[EgressChannelParams]
)(implicit val p: Parameters) extends Module
with HasRouterParams
with HasRouterInputParams
with HasRouterOutputParams
with HasNoCParams {
val io = IO(new Bundle {
val req = MixedVec(allInParams.map { u => Flipped(Decoupled(new RouteComputerReq)) })
val resp = MixedVec(allInParams.map { u => Output(new RouteComputerResp(outParams, egressParams)) })
})
(io.req zip io.resp).zipWithIndex.map { case ((req, resp), i) =>
req.ready := true.B
if (outParams.size == 0) {
assert(!req.valid)
resp.vc_sel := DontCare
} else {
def toUInt(t: (Int, FlowRoutingInfo)): UInt = {
val l2 = (BigInt(t._1) << req.bits.flow.vnet_id .getWidth) | t._2.vNetId
val l3 = ( l2 << req.bits.flow.ingress_node .getWidth) | t._2.ingressNode
val l4 = ( l3 << req.bits.flow.ingress_node_id.getWidth) | t._2.ingressNodeId
val l5 = ( l4 << req.bits.flow.egress_node .getWidth) | t._2.egressNode
val l6 = ( l5 << req.bits.flow.egress_node_id .getWidth) | t._2.egressNodeId
l6.U(req.bits.getWidth.W)
}
val flow = req.bits.flow
val table = allInParams(i).possibleFlows.toSeq.distinct.map { pI =>
allInParams(i).channelRoutingInfos.map { cI =>
var row: String = "b"
(0 until nOutputs).foreach { o =>
(0 until outParams(o).nVirtualChannels).foreach { outVId =>
row = row + (if (routingRelation(cI, outParams(o).channelRoutingInfos(outVId), pI)) "1" else "0")
}
}
((cI.vc, pI), row)
}
}.flatten
val addr = req.bits.asUInt
val width = outParams.map(_.nVirtualChannels).reduce(_+_)
val decoded = if (table.size > 0) {
val truthTable = TruthTable(
table.map { e => (BitPat(toUInt(e._1)), BitPat(e._2)) },
BitPat("b" + "?" * width)
)
Reverse(decoder(addr, truthTable))
} else {
0.U(width.W)
}
var idx = 0
(0 until nAllOutputs).foreach { o =>
if (o < nOutputs) {
(0 until outParams(o).nVirtualChannels).foreach { outVId =>
resp.vc_sel(o)(outVId) := decoded(idx)
idx += 1
}
} else {
resp.vc_sel(o)(0) := false.B
}
}
}
}
}
| module RouteComputer_27( // @[RouteComputer.scala:29:7]
input io_req_1_bits_src_virt_id, // @[RouteComputer.scala:40:14]
input io_req_1_bits_flow_vnet_id, // @[RouteComputer.scala:40:14]
input [3:0] io_req_1_bits_flow_ingress_node, // @[RouteComputer.scala:40:14]
input [1:0] io_req_1_bits_flow_ingress_node_id, // @[RouteComputer.scala:40:14]
input [3:0] io_req_1_bits_flow_egress_node, // @[RouteComputer.scala:40:14]
input [1:0] io_req_1_bits_flow_egress_node_id, // @[RouteComputer.scala:40:14]
input io_req_0_bits_src_virt_id, // @[RouteComputer.scala:40:14]
input io_req_0_bits_flow_vnet_id, // @[RouteComputer.scala:40:14]
input [3:0] io_req_0_bits_flow_ingress_node, // @[RouteComputer.scala:40:14]
input [1:0] io_req_0_bits_flow_ingress_node_id, // @[RouteComputer.scala:40:14]
input [3:0] io_req_0_bits_flow_egress_node, // @[RouteComputer.scala:40:14]
input [1:0] io_req_0_bits_flow_egress_node_id, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_2_0, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_2_1, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_1_0, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_1_1, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_0, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_1, // @[RouteComputer.scala:40:14]
output io_resp_0_vc_sel_2_0, // @[RouteComputer.scala:40:14]
output io_resp_0_vc_sel_2_1, // @[RouteComputer.scala:40:14]
output io_resp_0_vc_sel_1_0, // @[RouteComputer.scala:40:14]
output io_resp_0_vc_sel_1_1, // @[RouteComputer.scala:40:14]
output io_resp_0_vc_sel_0_0, // @[RouteComputer.scala:40:14]
output io_resp_0_vc_sel_0_1 // @[RouteComputer.scala:40:14]
);
wire [11:0] decoded_invInputs = ~{io_req_0_bits_flow_ingress_node, io_req_0_bits_flow_ingress_node_id, io_req_0_bits_flow_egress_node, io_req_0_bits_flow_egress_node_id}; // @[pla.scala:78:21]
wire [10:0] decoded_invInputs_1 = ~{io_req_1_bits_flow_ingress_node[2:0], io_req_1_bits_flow_ingress_node_id, io_req_1_bits_flow_egress_node, io_req_1_bits_flow_egress_node_id}; // @[pla.scala:78:21]
assign io_resp_1_vc_sel_2_0 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_1_vc_sel_2_1 = |{&{decoded_invInputs_1[0], io_req_1_bits_flow_egress_node[2], io_req_1_bits_flow_egress_node[3], decoded_invInputs_1[6], decoded_invInputs_1[7], io_req_1_bits_flow_ingress_node[0], decoded_invInputs_1[9], decoded_invInputs_1[10], io_req_1_bits_flow_ingress_node[3], io_req_1_bits_flow_vnet_id}, &{decoded_invInputs_1[0], io_req_1_bits_flow_egress_node[2], io_req_1_bits_flow_egress_node[3], decoded_invInputs_1[6], decoded_invInputs_1[7], decoded_invInputs_1[8], io_req_1_bits_flow_ingress_node[1], decoded_invInputs_1[10], io_req_1_bits_flow_ingress_node[3], io_req_1_bits_flow_vnet_id}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}]
assign io_resp_1_vc_sel_1_0 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_1_vc_sel_1_1 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_1_vc_sel_0_0 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_1_vc_sel_0_1 = |{&{decoded_invInputs_1[0], decoded_invInputs_1[1], decoded_invInputs_1[5], decoded_invInputs_1[6], decoded_invInputs_1[7], io_req_1_bits_flow_ingress_node[0], decoded_invInputs_1[9], decoded_invInputs_1[10], io_req_1_bits_flow_ingress_node[3], io_req_1_bits_flow_vnet_id}, &{decoded_invInputs_1[0], decoded_invInputs_1[1], decoded_invInputs_1[5], decoded_invInputs_1[6], decoded_invInputs_1[7], decoded_invInputs_1[8], io_req_1_bits_flow_ingress_node[1], decoded_invInputs_1[10], io_req_1_bits_flow_ingress_node[3], io_req_1_bits_flow_vnet_id}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}]
assign io_resp_0_vc_sel_2_0 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_0_vc_sel_2_1 = |{&{decoded_invInputs[0], io_req_0_bits_flow_egress_node[2], io_req_0_bits_flow_egress_node[3], decoded_invInputs[6], decoded_invInputs[7], io_req_0_bits_flow_ingress_node[0], decoded_invInputs[9], io_req_0_bits_flow_ingress_node[2], decoded_invInputs[11], io_req_0_bits_flow_vnet_id}, &{decoded_invInputs[0], io_req_0_bits_flow_egress_node[2], io_req_0_bits_flow_egress_node[3], decoded_invInputs[6], decoded_invInputs[7], decoded_invInputs[8], io_req_0_bits_flow_ingress_node[1], io_req_0_bits_flow_ingress_node[2], decoded_invInputs[11], io_req_0_bits_flow_vnet_id}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}]
assign io_resp_0_vc_sel_1_0 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_0_vc_sel_1_1 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_0_vc_sel_0_0 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_0_vc_sel_0_1 = 1'h0; // @[RouteComputer.scala:29:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File primitives.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object lowMask
{
def apply(in: UInt, topBound: BigInt, bottomBound: BigInt): UInt =
{
require(topBound != bottomBound)
val numInVals = BigInt(1)<<in.getWidth
if (topBound < bottomBound) {
lowMask(~in, numInVals - 1 - topBound, numInVals - 1 - bottomBound)
} else if (numInVals > 64 /* Empirical */) {
// For simulation performance, we should avoid generating
// exteremely wide shifters, so we divide and conquer.
// Empirically, this does not impact synthesis QoR.
val mid = numInVals / 2
val msb = in(in.getWidth - 1)
val lsbs = in(in.getWidth - 2, 0)
if (mid < topBound) {
if (mid <= bottomBound) {
Mux(msb,
lowMask(lsbs, topBound - mid, bottomBound - mid),
0.U
)
} else {
Mux(msb,
lowMask(lsbs, topBound - mid, 0) ## ((BigInt(1)<<(mid - bottomBound).toInt) - 1).U,
lowMask(lsbs, mid, bottomBound)
)
}
} else {
~Mux(msb, 0.U, ~lowMask(lsbs, topBound, bottomBound))
}
} else {
val shift = (BigInt(-1)<<numInVals.toInt).S>>in
Reverse(
shift(
(numInVals - 1 - bottomBound).toInt,
(numInVals - topBound).toInt
)
)
}
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object countLeadingZeros
{
def apply(in: UInt): UInt = PriorityEncoder(in.asBools.reverse)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy2
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 1)>>1
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 2 + 1, ix * 2).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 2).orR
reducedVec.asUInt
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy4
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 3)>>2
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 4 + 3, ix * 4).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 4).orR
reducedVec.asUInt
}
}
File RoundAnyRawFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util.Fill
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundAnyRawFNToRecFN(
inExpWidth: Int,
inSigWidth: Int,
outExpWidth: Int,
outSigWidth: Int,
options: Int
)
extends RawModule
{
override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(inExpWidth, inSigWidth))
// (allowed exponent range has limits)
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0)
val effectiveInSigWidth =
if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1
val neverUnderflows =
((options &
(flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact)
) != 0) ||
(inExpWidth < outExpWidth)
val neverOverflows =
((options & flRoundOpt_neverOverflows) != 0) ||
(inExpWidth < outExpWidth)
val outNaNExp = BigInt(7)<<(outExpWidth - 2)
val outInfExp = BigInt(6)<<(outExpWidth - 2)
val outMaxFiniteExp = outInfExp - 1
val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2
val outMinNonzeroExp = outMinNormExp - outSigWidth + 1
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_near_even = (io.roundingMode === round_near_even)
val roundingMode_minMag = (io.roundingMode === round_minMag)
val roundingMode_min = (io.roundingMode === round_min)
val roundingMode_max = (io.roundingMode === round_max)
val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag)
val roundingMode_odd = (io.roundingMode === round_odd)
val roundMagUp =
(roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sAdjustedExp =
if (inExpWidth < outExpWidth)
(io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
)(outExpWidth, 0).zext
else if (inExpWidth == outExpWidth)
io.in.sExp
else
io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
val adjustedSig =
if (inSigWidth <= outSigWidth + 2)
io.in.sig<<(outSigWidth - inSigWidth + 2)
else
(io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ##
io.in.sig(inSigWidth - outSigWidth - 2, 0).orR
)
val doShiftSigDown1 =
if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2)
val common_expOut = Wire(UInt((outExpWidth + 1).W))
val common_fractOut = Wire(UInt((outSigWidth - 1).W))
val common_overflow = Wire(Bool())
val common_totalUnderflow = Wire(Bool())
val common_underflow = Wire(Bool())
val common_inexact = Wire(Bool())
if (
neverOverflows && neverUnderflows
&& (effectiveInSigWidth <= outSigWidth)
) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1
common_fractOut :=
Mux(doShiftSigDown1,
adjustedSig(outSigWidth + 1, 3),
adjustedSig(outSigWidth, 2)
)
common_overflow := false.B
common_totalUnderflow := false.B
common_underflow := false.B
common_inexact := false.B
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundMask =
if (neverUnderflows)
0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W)
else
(lowMask(
sAdjustedExp(outExpWidth, 0),
outMinNormExp - outSigWidth - 1,
outMinNormExp
) | doShiftSigDown1) ##
3.U(2.W)
val shiftedRoundMask = 0.U(1.W) ## roundMask>>1
val roundPosMask = ~shiftedRoundMask & roundMask
val roundPosBit = (adjustedSig & roundPosMask).orR
val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR
val anyRound = roundPosBit || anyRoundExtra
val roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
roundPosBit) ||
(roundMagUp && anyRound)
val roundedSig: Bits =
Mux(roundIncr,
(((adjustedSig | roundMask)>>2) +& 1.U) &
~Mux(roundingMode_near_even && roundPosBit &&
! anyRoundExtra,
roundMask>>1,
0.U((outSigWidth + 2).W)
),
(adjustedSig & ~roundMask)>>2 |
Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U)
)
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext
common_expOut := sRoundedExp(outExpWidth, 0)
common_fractOut :=
Mux(doShiftSigDown1,
roundedSig(outSigWidth - 1, 1),
roundedSig(outSigWidth - 2, 0)
)
common_overflow :=
(if (neverOverflows) false.B else
//*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?:
(sRoundedExp>>(outExpWidth - 1) >= 3.S))
common_totalUnderflow :=
(if (neverUnderflows) false.B else
//*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?:
(sRoundedExp < outMinNonzeroExp.S))
val unboundedRange_roundPosBit =
Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1))
val unboundedRange_anyRound =
(doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR
val unboundedRange_roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
unboundedRange_roundPosBit) ||
(roundMagUp && unboundedRange_anyRound)
val roundCarry =
Mux(doShiftSigDown1,
roundedSig(outSigWidth + 1),
roundedSig(outSigWidth)
)
common_underflow :=
(if (neverUnderflows) false.B else
common_totalUnderflow ||
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
(anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) &&
Mux(doShiftSigDown1, roundMask(3), roundMask(2)) &&
! ((io.detectTininess === tininess_afterRounding) &&
! Mux(doShiftSigDown1,
roundMask(4),
roundMask(3)
) &&
roundCarry && roundPosBit &&
unboundedRange_roundIncr)))
common_inexact := common_totalUnderflow || anyRound
}
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val isNaNOut = io.invalidExc || io.in.isNaN
val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf
val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero
val overflow = commonCase && common_overflow
val underflow = commonCase && common_underflow
val inexact = overflow || (commonCase && common_inexact)
val overflow_roundMagUp =
roundingMode_near_even || roundingMode_near_maxMag || roundMagUp
val pegMinNonzeroMagOut =
commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd)
val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp
val notNaN_isInfOut =
notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp)
val signOut = Mux(isNaNOut, false.B, io.in.sign)
val expOut =
(common_expOut &
~Mux(io.in.isZero || common_totalUnderflow,
(BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMinNonzeroMagOut,
~outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMaxFiniteMagOut,
(BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W),
0.U
) &
~Mux(notNaN_isInfOut,
(BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
)) |
Mux(pegMinNonzeroMagOut,
outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) |
Mux(pegMaxFiniteMagOut,
outMaxFiniteExp.U((outExpWidth + 1).W),
0.U
) |
Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) |
Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U)
val fractOut =
Mux(isNaNOut || io.in.isZero || common_totalUnderflow,
Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U),
common_fractOut
) |
Fill(outSigWidth - 1, pegMaxFiniteMagOut)
io.out := signOut ## expOut ## fractOut
io.exceptionFlags :=
io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int)
extends RawModule
{
override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(expWidth, sigWidth + 2))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
expWidth, sigWidth + 2, expWidth, sigWidth, options))
roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc
roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc
roundAnyRawFNToRecFN.io.in := io.in
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
| module RoundAnyRawFNToRecFN_ie8_is26_oe8_os24_123( // @[RoundAnyRawFNToRecFN.scala:48:5]
input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [9:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [26:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:58:16]
output [32:0] io_out, // @[RoundAnyRawFNToRecFN.scala:58:16]
output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:58:16]
);
wire io_invalidExc_0 = io_invalidExc; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isNaN_0 = io_in_isNaN; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isInf_0 = io_in_isInf; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [9:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [26:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [8:0] _expOut_T_4 = 9'h194; // @[RoundAnyRawFNToRecFN.scala:258:19]
wire [15:0] _roundMask_T_5 = 16'hFF; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_4 = 16'hFF00; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_10 = 16'hFF00; // @[primitives.scala:77:20]
wire [11:0] _roundMask_T_13 = 12'hFF; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_14 = 16'hFF0; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_15 = 16'hF0F; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_20 = 16'hF0F0; // @[primitives.scala:77:20]
wire [13:0] _roundMask_T_23 = 14'hF0F; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_24 = 16'h3C3C; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_25 = 16'h3333; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_30 = 16'hCCCC; // @[primitives.scala:77:20]
wire [14:0] _roundMask_T_33 = 15'h3333; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_34 = 16'h6666; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_35 = 16'h5555; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_40 = 16'hAAAA; // @[primitives.scala:77:20]
wire [25:0] _roundedSig_T_15 = 26'h0; // @[RoundAnyRawFNToRecFN.scala:181:24]
wire [8:0] _expOut_T_6 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14]
wire [8:0] _expOut_T_9 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14]
wire [8:0] _expOut_T_5 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:257:18]
wire [8:0] _expOut_T_8 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:261:18]
wire [8:0] _expOut_T_14 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:269:16]
wire [8:0] _expOut_T_16 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:273:16]
wire [22:0] _fractOut_T_4 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:284:13]
wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire roundingMode_near_even = 1'h1; // @[RoundAnyRawFNToRecFN.scala:90:53]
wire _roundIncr_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:169:38]
wire _unboundedRange_roundIncr_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:207:38]
wire _common_underflow_T_7 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:222:49]
wire _overflow_roundMagUp_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:243:32]
wire overflow_roundMagUp = 1'h1; // @[RoundAnyRawFNToRecFN.scala:243:60]
wire [2:0] io_roundingMode = 3'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire roundingMode_minMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:91:53]
wire roundingMode_min = 1'h0; // @[RoundAnyRawFNToRecFN.scala:92:53]
wire roundingMode_max = 1'h0; // @[RoundAnyRawFNToRecFN.scala:93:53]
wire roundingMode_near_maxMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:94:53]
wire roundingMode_odd = 1'h0; // @[RoundAnyRawFNToRecFN.scala:95:53]
wire _roundMagUp_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:27]
wire _roundMagUp_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:63]
wire roundMagUp = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:42]
wire _roundIncr_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:171:29]
wire _roundedSig_T_13 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:181:42]
wire _unboundedRange_roundIncr_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:209:29]
wire _pegMinNonzeroMagOut_T_1 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:60]
wire pegMinNonzeroMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:45]
wire _pegMaxFiniteMagOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:42]
wire pegMaxFiniteMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:39]
wire notNaN_isSpecialInfOut = io_in_isInf_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :236:49]
wire [26:0] adjustedSig = io_in_sig_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :114:22]
wire [32:0] _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:286:33]
wire [4:0] _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:288:66]
wire [32:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire _roundMagUp_T_1 = ~io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :98:66]
wire doShiftSigDown1 = adjustedSig[26]; // @[RoundAnyRawFNToRecFN.scala:114:22, :120:57]
wire [8:0] _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:187:37]
wire [8:0] common_expOut; // @[RoundAnyRawFNToRecFN.scala:122:31]
wire [22:0] _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:189:16]
wire [22:0] common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31]
wire _common_overflow_T_1; // @[RoundAnyRawFNToRecFN.scala:196:50]
wire common_overflow; // @[RoundAnyRawFNToRecFN.scala:124:37]
wire _common_totalUnderflow_T; // @[RoundAnyRawFNToRecFN.scala:200:31]
wire common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37]
wire _common_underflow_T_18; // @[RoundAnyRawFNToRecFN.scala:217:40]
wire common_underflow; // @[RoundAnyRawFNToRecFN.scala:126:37]
wire _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:230:49]
wire common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37]
wire [8:0] _roundMask_T = io_in_sExp_0[8:0]; // @[RoundAnyRawFNToRecFN.scala:48:5, :156:37]
wire [8:0] _roundMask_T_1 = ~_roundMask_T; // @[primitives.scala:52:21]
wire roundMask_msb = _roundMask_T_1[8]; // @[primitives.scala:52:21, :58:25]
wire [7:0] roundMask_lsbs = _roundMask_T_1[7:0]; // @[primitives.scala:52:21, :59:26]
wire roundMask_msb_1 = roundMask_lsbs[7]; // @[primitives.scala:58:25, :59:26]
wire [6:0] roundMask_lsbs_1 = roundMask_lsbs[6:0]; // @[primitives.scala:59:26]
wire roundMask_msb_2 = roundMask_lsbs_1[6]; // @[primitives.scala:58:25, :59:26]
wire roundMask_msb_3 = roundMask_lsbs_1[6]; // @[primitives.scala:58:25, :59:26]
wire [5:0] roundMask_lsbs_2 = roundMask_lsbs_1[5:0]; // @[primitives.scala:59:26]
wire [5:0] roundMask_lsbs_3 = roundMask_lsbs_1[5:0]; // @[primitives.scala:59:26]
wire [64:0] roundMask_shift = $signed(65'sh10000000000000000 >>> roundMask_lsbs_2); // @[primitives.scala:59:26, :76:56]
wire [21:0] _roundMask_T_2 = roundMask_shift[63:42]; // @[primitives.scala:76:56, :78:22]
wire [15:0] _roundMask_T_3 = _roundMask_T_2[15:0]; // @[primitives.scala:77:20, :78:22]
wire [7:0] _roundMask_T_6 = _roundMask_T_3[15:8]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_7 = {8'h0, _roundMask_T_6}; // @[primitives.scala:77:20]
wire [7:0] _roundMask_T_8 = _roundMask_T_3[7:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_9 = {_roundMask_T_8, 8'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_11 = _roundMask_T_9 & 16'hFF00; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_12 = _roundMask_T_7 | _roundMask_T_11; // @[primitives.scala:77:20]
wire [11:0] _roundMask_T_16 = _roundMask_T_12[15:4]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_17 = {4'h0, _roundMask_T_16 & 12'hF0F}; // @[primitives.scala:77:20]
wire [11:0] _roundMask_T_18 = _roundMask_T_12[11:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_19 = {_roundMask_T_18, 4'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_21 = _roundMask_T_19 & 16'hF0F0; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_22 = _roundMask_T_17 | _roundMask_T_21; // @[primitives.scala:77:20]
wire [13:0] _roundMask_T_26 = _roundMask_T_22[15:2]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_27 = {2'h0, _roundMask_T_26 & 14'h3333}; // @[primitives.scala:77:20]
wire [13:0] _roundMask_T_28 = _roundMask_T_22[13:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_29 = {_roundMask_T_28, 2'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_31 = _roundMask_T_29 & 16'hCCCC; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_32 = _roundMask_T_27 | _roundMask_T_31; // @[primitives.scala:77:20]
wire [14:0] _roundMask_T_36 = _roundMask_T_32[15:1]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_37 = {1'h0, _roundMask_T_36 & 15'h5555}; // @[primitives.scala:77:20]
wire [14:0] _roundMask_T_38 = _roundMask_T_32[14:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_39 = {_roundMask_T_38, 1'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_41 = _roundMask_T_39 & 16'hAAAA; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_42 = _roundMask_T_37 | _roundMask_T_41; // @[primitives.scala:77:20]
wire [5:0] _roundMask_T_43 = _roundMask_T_2[21:16]; // @[primitives.scala:77:20, :78:22]
wire [3:0] _roundMask_T_44 = _roundMask_T_43[3:0]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_45 = _roundMask_T_44[1:0]; // @[primitives.scala:77:20]
wire _roundMask_T_46 = _roundMask_T_45[0]; // @[primitives.scala:77:20]
wire _roundMask_T_47 = _roundMask_T_45[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_48 = {_roundMask_T_46, _roundMask_T_47}; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_49 = _roundMask_T_44[3:2]; // @[primitives.scala:77:20]
wire _roundMask_T_50 = _roundMask_T_49[0]; // @[primitives.scala:77:20]
wire _roundMask_T_51 = _roundMask_T_49[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_52 = {_roundMask_T_50, _roundMask_T_51}; // @[primitives.scala:77:20]
wire [3:0] _roundMask_T_53 = {_roundMask_T_48, _roundMask_T_52}; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_54 = _roundMask_T_43[5:4]; // @[primitives.scala:77:20]
wire _roundMask_T_55 = _roundMask_T_54[0]; // @[primitives.scala:77:20]
wire _roundMask_T_56 = _roundMask_T_54[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_57 = {_roundMask_T_55, _roundMask_T_56}; // @[primitives.scala:77:20]
wire [5:0] _roundMask_T_58 = {_roundMask_T_53, _roundMask_T_57}; // @[primitives.scala:77:20]
wire [21:0] _roundMask_T_59 = {_roundMask_T_42, _roundMask_T_58}; // @[primitives.scala:77:20]
wire [21:0] _roundMask_T_60 = ~_roundMask_T_59; // @[primitives.scala:73:32, :77:20]
wire [21:0] _roundMask_T_61 = roundMask_msb_2 ? 22'h0 : _roundMask_T_60; // @[primitives.scala:58:25, :73:{21,32}]
wire [21:0] _roundMask_T_62 = ~_roundMask_T_61; // @[primitives.scala:73:{17,21}]
wire [24:0] _roundMask_T_63 = {_roundMask_T_62, 3'h7}; // @[primitives.scala:68:58, :73:17]
wire [64:0] roundMask_shift_1 = $signed(65'sh10000000000000000 >>> roundMask_lsbs_3); // @[primitives.scala:59:26, :76:56]
wire [2:0] _roundMask_T_64 = roundMask_shift_1[2:0]; // @[primitives.scala:76:56, :78:22]
wire [1:0] _roundMask_T_65 = _roundMask_T_64[1:0]; // @[primitives.scala:77:20, :78:22]
wire _roundMask_T_66 = _roundMask_T_65[0]; // @[primitives.scala:77:20]
wire _roundMask_T_67 = _roundMask_T_65[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_68 = {_roundMask_T_66, _roundMask_T_67}; // @[primitives.scala:77:20]
wire _roundMask_T_69 = _roundMask_T_64[2]; // @[primitives.scala:77:20, :78:22]
wire [2:0] _roundMask_T_70 = {_roundMask_T_68, _roundMask_T_69}; // @[primitives.scala:77:20]
wire [2:0] _roundMask_T_71 = roundMask_msb_3 ? _roundMask_T_70 : 3'h0; // @[primitives.scala:58:25, :62:24, :77:20]
wire [24:0] _roundMask_T_72 = roundMask_msb_1 ? _roundMask_T_63 : {22'h0, _roundMask_T_71}; // @[primitives.scala:58:25, :62:24, :67:24, :68:58]
wire [24:0] _roundMask_T_73 = roundMask_msb ? _roundMask_T_72 : 25'h0; // @[primitives.scala:58:25, :62:24, :67:24]
wire [24:0] _roundMask_T_74 = {_roundMask_T_73[24:1], _roundMask_T_73[0] | doShiftSigDown1}; // @[primitives.scala:62:24]
wire [26:0] roundMask = {_roundMask_T_74, 2'h3}; // @[RoundAnyRawFNToRecFN.scala:159:{23,42}]
wire [27:0] _shiftedRoundMask_T = {1'h0, roundMask}; // @[RoundAnyRawFNToRecFN.scala:159:42, :162:41]
wire [26:0] shiftedRoundMask = _shiftedRoundMask_T[27:1]; // @[RoundAnyRawFNToRecFN.scala:162:{41,53}]
wire [26:0] _roundPosMask_T = ~shiftedRoundMask; // @[RoundAnyRawFNToRecFN.scala:162:53, :163:28]
wire [26:0] roundPosMask = _roundPosMask_T & roundMask; // @[RoundAnyRawFNToRecFN.scala:159:42, :163:{28,46}]
wire [26:0] _roundPosBit_T = adjustedSig & roundPosMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :163:46, :164:40]
wire roundPosBit = |_roundPosBit_T; // @[RoundAnyRawFNToRecFN.scala:164:{40,56}]
wire _roundIncr_T_1 = roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :169:67]
wire _roundedSig_T_3 = roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :175:49]
wire [26:0] _anyRoundExtra_T = adjustedSig & shiftedRoundMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :162:53, :165:42]
wire anyRoundExtra = |_anyRoundExtra_T; // @[RoundAnyRawFNToRecFN.scala:165:{42,62}]
wire anyRound = roundPosBit | anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:164:56, :165:62, :166:36]
wire roundIncr = _roundIncr_T_1; // @[RoundAnyRawFNToRecFN.scala:169:67, :170:31]
wire [26:0] _roundedSig_T = adjustedSig | roundMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :159:42, :174:32]
wire [24:0] _roundedSig_T_1 = _roundedSig_T[26:2]; // @[RoundAnyRawFNToRecFN.scala:174:{32,44}]
wire [25:0] _roundedSig_T_2 = {1'h0, _roundedSig_T_1} + 26'h1; // @[RoundAnyRawFNToRecFN.scala:174:{44,49}]
wire _roundedSig_T_4 = ~anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:165:62, :176:30]
wire _roundedSig_T_5 = _roundedSig_T_3 & _roundedSig_T_4; // @[RoundAnyRawFNToRecFN.scala:175:{49,64}, :176:30]
wire [25:0] _roundedSig_T_6 = roundMask[26:1]; // @[RoundAnyRawFNToRecFN.scala:159:42, :177:35]
wire [25:0] _roundedSig_T_7 = _roundedSig_T_5 ? _roundedSig_T_6 : 26'h0; // @[RoundAnyRawFNToRecFN.scala:175:{25,64}, :177:35]
wire [25:0] _roundedSig_T_8 = ~_roundedSig_T_7; // @[RoundAnyRawFNToRecFN.scala:175:{21,25}]
wire [25:0] _roundedSig_T_9 = _roundedSig_T_2 & _roundedSig_T_8; // @[RoundAnyRawFNToRecFN.scala:174:{49,57}, :175:21]
wire [26:0] _roundedSig_T_10 = ~roundMask; // @[RoundAnyRawFNToRecFN.scala:159:42, :180:32]
wire [26:0] _roundedSig_T_11 = adjustedSig & _roundedSig_T_10; // @[RoundAnyRawFNToRecFN.scala:114:22, :180:{30,32}]
wire [24:0] _roundedSig_T_12 = _roundedSig_T_11[26:2]; // @[RoundAnyRawFNToRecFN.scala:180:{30,43}]
wire [25:0] _roundedSig_T_14 = roundPosMask[26:1]; // @[RoundAnyRawFNToRecFN.scala:163:46, :181:67]
wire [25:0] _roundedSig_T_16 = {1'h0, _roundedSig_T_12}; // @[RoundAnyRawFNToRecFN.scala:180:{43,47}]
wire [25:0] roundedSig = roundIncr ? _roundedSig_T_9 : _roundedSig_T_16; // @[RoundAnyRawFNToRecFN.scala:170:31, :173:16, :174:57, :180:47]
wire [1:0] _sRoundedExp_T = roundedSig[25:24]; // @[RoundAnyRawFNToRecFN.scala:173:16, :185:54]
wire [2:0] _sRoundedExp_T_1 = {1'h0, _sRoundedExp_T}; // @[RoundAnyRawFNToRecFN.scala:185:{54,76}]
wire [10:0] sRoundedExp = {io_in_sExp_0[9], io_in_sExp_0} + {{8{_sRoundedExp_T_1[2]}}, _sRoundedExp_T_1}; // @[RoundAnyRawFNToRecFN.scala:48:5, :185:{40,76}]
assign _common_expOut_T = sRoundedExp[8:0]; // @[RoundAnyRawFNToRecFN.scala:185:40, :187:37]
assign common_expOut = _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:122:31, :187:37]
wire [22:0] _common_fractOut_T = roundedSig[23:1]; // @[RoundAnyRawFNToRecFN.scala:173:16, :190:27]
wire [22:0] _common_fractOut_T_1 = roundedSig[22:0]; // @[RoundAnyRawFNToRecFN.scala:173:16, :191:27]
assign _common_fractOut_T_2 = doShiftSigDown1 ? _common_fractOut_T : _common_fractOut_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :189:16, :190:27, :191:27]
assign common_fractOut = _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:123:31, :189:16]
wire [3:0] _common_overflow_T = sRoundedExp[10:7]; // @[RoundAnyRawFNToRecFN.scala:185:40, :196:30]
assign _common_overflow_T_1 = $signed(_common_overflow_T) > 4'sh2; // @[RoundAnyRawFNToRecFN.scala:196:{30,50}]
assign common_overflow = _common_overflow_T_1; // @[RoundAnyRawFNToRecFN.scala:124:37, :196:50]
assign _common_totalUnderflow_T = $signed(sRoundedExp) < 11'sh6B; // @[RoundAnyRawFNToRecFN.scala:185:40, :200:31]
assign common_totalUnderflow = _common_totalUnderflow_T; // @[RoundAnyRawFNToRecFN.scala:125:37, :200:31]
wire _unboundedRange_roundPosBit_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:45]
wire _unboundedRange_anyRound_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:45, :205:44]
wire _unboundedRange_roundPosBit_T_1 = adjustedSig[1]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:61]
wire unboundedRange_roundPosBit = doShiftSigDown1 ? _unboundedRange_roundPosBit_T : _unboundedRange_roundPosBit_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :203:{16,45,61}]
wire _unboundedRange_roundIncr_T_1 = unboundedRange_roundPosBit; // @[RoundAnyRawFNToRecFN.scala:203:16, :207:67]
wire _unboundedRange_anyRound_T_1 = doShiftSigDown1 & _unboundedRange_anyRound_T; // @[RoundAnyRawFNToRecFN.scala:120:57, :205:{30,44}]
wire [1:0] _unboundedRange_anyRound_T_2 = adjustedSig[1:0]; // @[RoundAnyRawFNToRecFN.scala:114:22, :205:63]
wire _unboundedRange_anyRound_T_3 = |_unboundedRange_anyRound_T_2; // @[RoundAnyRawFNToRecFN.scala:205:{63,70}]
wire unboundedRange_anyRound = _unboundedRange_anyRound_T_1 | _unboundedRange_anyRound_T_3; // @[RoundAnyRawFNToRecFN.scala:205:{30,49,70}]
wire unboundedRange_roundIncr = _unboundedRange_roundIncr_T_1; // @[RoundAnyRawFNToRecFN.scala:207:67, :208:46]
wire _roundCarry_T = roundedSig[25]; // @[RoundAnyRawFNToRecFN.scala:173:16, :212:27]
wire _roundCarry_T_1 = roundedSig[24]; // @[RoundAnyRawFNToRecFN.scala:173:16, :213:27]
wire roundCarry = doShiftSigDown1 ? _roundCarry_T : _roundCarry_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :211:16, :212:27, :213:27]
wire [1:0] _common_underflow_T = io_in_sExp_0[9:8]; // @[RoundAnyRawFNToRecFN.scala:48:5, :220:49]
wire _common_underflow_T_1 = _common_underflow_T != 2'h1; // @[RoundAnyRawFNToRecFN.scala:220:{49,64}]
wire _common_underflow_T_2 = anyRound & _common_underflow_T_1; // @[RoundAnyRawFNToRecFN.scala:166:36, :220:{32,64}]
wire _common_underflow_T_3 = roundMask[3]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:57]
wire _common_underflow_T_9 = roundMask[3]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:57, :225:49]
wire _common_underflow_T_4 = roundMask[2]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:71]
wire _common_underflow_T_5 = doShiftSigDown1 ? _common_underflow_T_3 : _common_underflow_T_4; // @[RoundAnyRawFNToRecFN.scala:120:57, :221:{30,57,71}]
wire _common_underflow_T_6 = _common_underflow_T_2 & _common_underflow_T_5; // @[RoundAnyRawFNToRecFN.scala:220:{32,72}, :221:30]
wire _common_underflow_T_8 = roundMask[4]; // @[RoundAnyRawFNToRecFN.scala:159:42, :224:49]
wire _common_underflow_T_10 = doShiftSigDown1 ? _common_underflow_T_8 : _common_underflow_T_9; // @[RoundAnyRawFNToRecFN.scala:120:57, :223:39, :224:49, :225:49]
wire _common_underflow_T_11 = ~_common_underflow_T_10; // @[RoundAnyRawFNToRecFN.scala:223:{34,39}]
wire _common_underflow_T_12 = _common_underflow_T_11; // @[RoundAnyRawFNToRecFN.scala:222:77, :223:34]
wire _common_underflow_T_13 = _common_underflow_T_12 & roundCarry; // @[RoundAnyRawFNToRecFN.scala:211:16, :222:77, :226:38]
wire _common_underflow_T_14 = _common_underflow_T_13 & roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :226:38, :227:45]
wire _common_underflow_T_15 = _common_underflow_T_14 & unboundedRange_roundIncr; // @[RoundAnyRawFNToRecFN.scala:208:46, :227:{45,60}]
wire _common_underflow_T_16 = ~_common_underflow_T_15; // @[RoundAnyRawFNToRecFN.scala:222:27, :227:60]
wire _common_underflow_T_17 = _common_underflow_T_6 & _common_underflow_T_16; // @[RoundAnyRawFNToRecFN.scala:220:72, :221:76, :222:27]
assign _common_underflow_T_18 = common_totalUnderflow | _common_underflow_T_17; // @[RoundAnyRawFNToRecFN.scala:125:37, :217:40, :221:76]
assign common_underflow = _common_underflow_T_18; // @[RoundAnyRawFNToRecFN.scala:126:37, :217:40]
assign _common_inexact_T = common_totalUnderflow | anyRound; // @[RoundAnyRawFNToRecFN.scala:125:37, :166:36, :230:49]
assign common_inexact = _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:127:37, :230:49]
wire isNaNOut = io_invalidExc_0 | io_in_isNaN_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34]
wire _commonCase_T = ~isNaNOut; // @[RoundAnyRawFNToRecFN.scala:235:34, :237:22]
wire _commonCase_T_1 = ~notNaN_isSpecialInfOut; // @[RoundAnyRawFNToRecFN.scala:236:49, :237:36]
wire _commonCase_T_2 = _commonCase_T & _commonCase_T_1; // @[RoundAnyRawFNToRecFN.scala:237:{22,33,36}]
wire _commonCase_T_3 = ~io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :237:64]
wire commonCase = _commonCase_T_2 & _commonCase_T_3; // @[RoundAnyRawFNToRecFN.scala:237:{33,61,64}]
wire overflow = commonCase & common_overflow; // @[RoundAnyRawFNToRecFN.scala:124:37, :237:61, :238:32]
wire _notNaN_isInfOut_T = overflow; // @[RoundAnyRawFNToRecFN.scala:238:32, :248:45]
wire underflow = commonCase & common_underflow; // @[RoundAnyRawFNToRecFN.scala:126:37, :237:61, :239:32]
wire _inexact_T = commonCase & common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37, :237:61, :240:43]
wire inexact = overflow | _inexact_T; // @[RoundAnyRawFNToRecFN.scala:238:32, :240:{28,43}]
wire _pegMinNonzeroMagOut_T = commonCase & common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37, :237:61, :245:20]
wire notNaN_isInfOut = notNaN_isSpecialInfOut | _notNaN_isInfOut_T; // @[RoundAnyRawFNToRecFN.scala:236:49, :248:{32,45}]
wire signOut = ~isNaNOut & io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34, :250:22]
wire _expOut_T = io_in_isZero_0 | common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:48:5, :125:37, :253:32]
wire [8:0] _expOut_T_1 = _expOut_T ? 9'h1C0 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:{18,32}]
wire [8:0] _expOut_T_2 = ~_expOut_T_1; // @[RoundAnyRawFNToRecFN.scala:253:{14,18}]
wire [8:0] _expOut_T_3 = common_expOut & _expOut_T_2; // @[RoundAnyRawFNToRecFN.scala:122:31, :252:24, :253:14]
wire [8:0] _expOut_T_7 = _expOut_T_3; // @[RoundAnyRawFNToRecFN.scala:252:24, :256:17]
wire [8:0] _expOut_T_10 = _expOut_T_7; // @[RoundAnyRawFNToRecFN.scala:256:17, :260:17]
wire [8:0] _expOut_T_11 = {2'h0, notNaN_isInfOut, 6'h0}; // @[RoundAnyRawFNToRecFN.scala:248:32, :265:18]
wire [8:0] _expOut_T_12 = ~_expOut_T_11; // @[RoundAnyRawFNToRecFN.scala:265:{14,18}]
wire [8:0] _expOut_T_13 = _expOut_T_10 & _expOut_T_12; // @[RoundAnyRawFNToRecFN.scala:260:17, :264:17, :265:14]
wire [8:0] _expOut_T_15 = _expOut_T_13; // @[RoundAnyRawFNToRecFN.scala:264:17, :268:18]
wire [8:0] _expOut_T_17 = _expOut_T_15; // @[RoundAnyRawFNToRecFN.scala:268:18, :272:15]
wire [8:0] _expOut_T_18 = notNaN_isInfOut ? 9'h180 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:248:32, :277:16]
wire [8:0] _expOut_T_19 = _expOut_T_17 | _expOut_T_18; // @[RoundAnyRawFNToRecFN.scala:272:15, :276:15, :277:16]
wire [8:0] _expOut_T_20 = isNaNOut ? 9'h1C0 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:235:34, :278:16]
wire [8:0] expOut = _expOut_T_19 | _expOut_T_20; // @[RoundAnyRawFNToRecFN.scala:276:15, :277:73, :278:16]
wire _fractOut_T = isNaNOut | io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34, :280:22]
wire _fractOut_T_1 = _fractOut_T | common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37, :280:{22,38}]
wire [22:0] _fractOut_T_2 = {isNaNOut, 22'h0}; // @[RoundAnyRawFNToRecFN.scala:235:34, :281:16]
wire [22:0] _fractOut_T_3 = _fractOut_T_1 ? _fractOut_T_2 : common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31, :280:{12,38}, :281:16]
wire [22:0] fractOut = _fractOut_T_3; // @[RoundAnyRawFNToRecFN.scala:280:12, :283:11]
wire [9:0] _io_out_T = {signOut, expOut}; // @[RoundAnyRawFNToRecFN.scala:250:22, :277:73, :286:23]
assign _io_out_T_1 = {_io_out_T, fractOut}; // @[RoundAnyRawFNToRecFN.scala:283:11, :286:{23,33}]
assign io_out_0 = _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:48:5, :286:33]
wire [1:0] _io_exceptionFlags_T = {io_invalidExc_0, 1'h0}; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:23]
wire [2:0] _io_exceptionFlags_T_1 = {_io_exceptionFlags_T, overflow}; // @[RoundAnyRawFNToRecFN.scala:238:32, :288:{23,41}]
wire [3:0] _io_exceptionFlags_T_2 = {_io_exceptionFlags_T_1, underflow}; // @[RoundAnyRawFNToRecFN.scala:239:32, :288:{41,53}]
assign _io_exceptionFlags_T_3 = {_io_exceptionFlags_T_2, inexact}; // @[RoundAnyRawFNToRecFN.scala:240:28, :288:{53,66}]
assign io_exceptionFlags_0 = _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:66]
assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File AsyncCrossing.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressSet, NodeHandle}
import freechips.rocketchip.prci.{AsynchronousCrossing}
import freechips.rocketchip.subsystem.CrossingWrapper
import freechips.rocketchip.util.{AsyncQueueParams, ToAsyncBundle, FromAsyncBundle, Pow2ClockDivider, property}
class TLAsyncCrossingSource(sync: Option[Int])(implicit p: Parameters) extends LazyModule
{
def this(x: Int)(implicit p: Parameters) = this(Some(x))
def this()(implicit p: Parameters) = this(None)
val node = TLAsyncSourceNode(sync)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = (Seq("TLAsyncCrossingSource") ++ node.in.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
val bce = edgeIn.manager.anySupportAcquireB && edgeIn.client.anySupportProbe
val psync = sync.getOrElse(edgeOut.manager.async.sync)
val params = edgeOut.manager.async.copy(sync = psync)
out.a <> ToAsyncBundle(in.a, params)
in.d <> FromAsyncBundle(out.d, psync)
property.cover(in.a, "TL_ASYNC_CROSSING_SOURCE_A", "MemorySystem;;TLAsyncCrossingSource Channel A")
property.cover(in.d, "TL_ASYNC_CROSSING_SOURCE_D", "MemorySystem;;TLAsyncCrossingSource Channel D")
if (bce) {
in.b <> FromAsyncBundle(out.b, psync)
out.c <> ToAsyncBundle(in.c, params)
out.e <> ToAsyncBundle(in.e, params)
property.cover(in.b, "TL_ASYNC_CROSSING_SOURCE_B", "MemorySystem;;TLAsyncCrossingSource Channel B")
property.cover(in.c, "TL_ASYNC_CROSSING_SOURCE_C", "MemorySystem;;TLAsyncCrossingSource Channel C")
property.cover(in.e, "TL_ASYNC_CROSSING_SOURCE_E", "MemorySystem;;TLAsyncCrossingSource Channel E")
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ridx := 0.U
out.c.widx := 0.U
out.e.widx := 0.U
}
}
}
}
class TLAsyncCrossingSink(params: AsyncQueueParams = AsyncQueueParams())(implicit p: Parameters) extends LazyModule
{
val node = TLAsyncSinkNode(params)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = (Seq("TLAsyncCrossingSink") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
val bce = edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe
out.a <> FromAsyncBundle(in.a, params.sync)
in.d <> ToAsyncBundle(out.d, params)
property.cover(out.a, "TL_ASYNC_CROSSING_SINK_A", "MemorySystem;;TLAsyncCrossingSink Channel A")
property.cover(out.d, "TL_ASYNC_CROSSING_SINK_D", "MemorySystem;;TLAsyncCrossingSink Channel D")
if (bce) {
in.b <> ToAsyncBundle(out.b, params)
out.c <> FromAsyncBundle(in.c, params.sync)
out.e <> FromAsyncBundle(in.e, params.sync)
property.cover(out.b, "TL_ASYNC_CROSSING_SINK_B", "MemorySystem;;TLAsyncCrossingSinkChannel B")
property.cover(out.c, "TL_ASYNC_CROSSING_SINK_C", "MemorySystem;;TLAsyncCrossingSink Channel C")
property.cover(out.e, "TL_ASYNC_CROSSING_SINK_E", "MemorySystem;;TLAsyncCrossingSink Channel E")
} else {
in.b.widx := 0.U
in.c.ridx := 0.U
in.e.ridx := 0.U
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLAsyncCrossingSource
{
def apply()(implicit p: Parameters): TLAsyncSourceNode = apply(None)
def apply(sync: Int)(implicit p: Parameters): TLAsyncSourceNode = apply(Some(sync))
def apply(sync: Option[Int])(implicit p: Parameters): TLAsyncSourceNode =
{
val asource = LazyModule(new TLAsyncCrossingSource(sync))
asource.node
}
}
object TLAsyncCrossingSink
{
def apply(params: AsyncQueueParams = AsyncQueueParams())(implicit p: Parameters) =
{
val asink = LazyModule(new TLAsyncCrossingSink(params))
asink.node
}
}
@deprecated("TLAsyncCrossing is fragile. Use TLAsyncCrossingSource and TLAsyncCrossingSink", "rocket-chip 1.2")
class TLAsyncCrossing(params: AsyncQueueParams = AsyncQueueParams())(implicit p: Parameters) extends LazyModule
{
val source = LazyModule(new TLAsyncCrossingSource())
val sink = LazyModule(new TLAsyncCrossingSink(params))
val node = NodeHandle(source.node, sink.node)
sink.node := source.node
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
val io = IO(new Bundle {
val in_clock = Input(Clock())
val in_reset = Input(Bool())
val out_clock = Input(Clock())
val out_reset = Input(Bool())
})
source.module.clock := io.in_clock
source.module.reset := io.in_reset
sink.module.clock := io.out_clock
sink.module.reset := io.out_reset
}
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMAsyncCrossing(txns: Int, params: AsynchronousCrossing = AsynchronousCrossing())(implicit p: Parameters) extends LazyModule {
val model = LazyModule(new TLRAMModel("AsyncCrossing"))
val fuzz = LazyModule(new TLFuzzer(txns))
val island = LazyModule(new CrossingWrapper(params))
val ram = island { LazyModule(new TLRAM(AddressSet(0x0, 0x3ff))) }
island.crossTLIn(ram.node) := TLFragmenter(4, 256) := TLDelayer(0.1) := model.node := fuzz.node
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
// Shove the RAM into another clock domain
val clocks = Module(new Pow2ClockDivider(2))
island.module.clock := clocks.io.clock_out
}
}
class TLRAMAsyncCrossingTest(txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut_wide = Module(LazyModule(new TLRAMAsyncCrossing(txns)).module)
val dut_narrow = Module(LazyModule(new TLRAMAsyncCrossing(txns, AsynchronousCrossing(safe = false, narrow = true))).module)
io.finished := dut_wide.io.finished && dut_narrow.io.finished
dut_wide.io.start := io.start
dut_narrow.io.start := io.start
}
| module TLAsyncCrossingSource( // @[AsyncCrossing.scala:23:9]
input clock, // @[AsyncCrossing.scala:23:9]
input reset // @[AsyncCrossing.scala:23:9]
);
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Breakpoint.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util.{Cat}
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.tile.{CoreBundle, HasCoreParameters}
import freechips.rocketchip.util._
class BPControl(implicit p: Parameters) extends CoreBundle()(p) {
val ttype = UInt(4.W)
val dmode = Bool()
val maskmax = UInt(6.W)
val reserved = UInt((xLen - (if (coreParams.useBPWatch) 26 else 24)).W)
val action = UInt((if (coreParams.useBPWatch) 3 else 1).W)
val chain = Bool()
val zero = UInt(2.W)
val tmatch = UInt(2.W)
val m = Bool()
val h = Bool()
val s = Bool()
val u = Bool()
val x = Bool()
val w = Bool()
val r = Bool()
def tType = 2
def maskMax = 4
def enabled(mstatus: MStatus) = !mstatus.debug && Cat(m, h, s, u)(mstatus.prv)
}
class TExtra(implicit p: Parameters) extends CoreBundle()(p) {
def mvalueBits: Int = if (xLen == 32) coreParams.mcontextWidth min 6 else coreParams.mcontextWidth min 13
def svalueBits: Int = if (xLen == 32) coreParams.scontextWidth min 16 else coreParams.scontextWidth min 34
def mselectPos: Int = if (xLen == 32) 25 else 50
def mvaluePos : Int = mselectPos + 1
def sselectPos: Int = 0
def svaluePos : Int = 2
val mvalue = UInt(mvalueBits.W)
val mselect = Bool()
val pad2 = UInt((mselectPos - svalueBits - 2).W)
val svalue = UInt(svalueBits.W)
val pad1 = UInt(1.W)
val sselect = Bool()
}
class BP(implicit p: Parameters) extends CoreBundle()(p) {
val control = new BPControl
val address = UInt(vaddrBits.W)
val textra = new TExtra
def contextMatch(mcontext: UInt, scontext: UInt) =
(if (coreParams.mcontextWidth > 0) (!textra.mselect || (mcontext(textra.mvalueBits-1,0) === textra.mvalue)) else true.B) &&
(if (coreParams.scontextWidth > 0) (!textra.sselect || (scontext(textra.svalueBits-1,0) === textra.svalue)) else true.B)
def mask(dummy: Int = 0) =
(0 until control.maskMax-1).scanLeft(control.tmatch(0))((m, i) => m && address(i)).asUInt
def pow2AddressMatch(x: UInt) =
(~x | mask()) === (~address | mask())
def rangeAddressMatch(x: UInt) =
(x >= address) ^ control.tmatch(0)
def addressMatch(x: UInt) =
Mux(control.tmatch(1), rangeAddressMatch(x), pow2AddressMatch(x))
}
class BPWatch (val n: Int) extends Bundle() {
val valid = Vec(n, Bool())
val rvalid = Vec(n, Bool())
val wvalid = Vec(n, Bool())
val ivalid = Vec(n, Bool())
val action = UInt(3.W)
}
class BreakpointUnit(n: Int)(implicit val p: Parameters) extends Module with HasCoreParameters {
val io = IO(new Bundle {
val status = Input(new MStatus())
val bp = Input(Vec(n, new BP))
val pc = Input(UInt(vaddrBits.W))
val ea = Input(UInt(vaddrBits.W))
val mcontext = Input(UInt(coreParams.mcontextWidth.W))
val scontext = Input(UInt(coreParams.scontextWidth.W))
val xcpt_if = Output(Bool())
val xcpt_ld = Output(Bool())
val xcpt_st = Output(Bool())
val debug_if = Output(Bool())
val debug_ld = Output(Bool())
val debug_st = Output(Bool())
val bpwatch = Output(Vec(n, new BPWatch(1)))
})
io.xcpt_if := false.B
io.xcpt_ld := false.B
io.xcpt_st := false.B
io.debug_if := false.B
io.debug_ld := false.B
io.debug_st := false.B
(io.bpwatch zip io.bp).foldLeft((true.B, true.B, true.B)) { case ((ri, wi, xi), (bpw, bp)) =>
val en = bp.control.enabled(io.status)
val cx = bp.contextMatch(io.mcontext, io.scontext)
val r = en && bp.control.r && bp.addressMatch(io.ea) && cx
val w = en && bp.control.w && bp.addressMatch(io.ea) && cx
val x = en && bp.control.x && bp.addressMatch(io.pc) && cx
val end = !bp.control.chain
val action = bp.control.action
bpw.action := action
bpw.valid(0) := false.B
bpw.rvalid(0) := false.B
bpw.wvalid(0) := false.B
bpw.ivalid(0) := false.B
when (end && r && ri) { io.xcpt_ld := (action === 0.U); io.debug_ld := (action === 1.U); bpw.valid(0) := true.B; bpw.rvalid(0) := true.B }
when (end && w && wi) { io.xcpt_st := (action === 0.U); io.debug_st := (action === 1.U); bpw.valid(0) := true.B; bpw.wvalid(0) := true.B }
when (end && x && xi) { io.xcpt_if := (action === 0.U); io.debug_if := (action === 1.U); bpw.valid(0) := true.B; bpw.ivalid(0) := true.B }
(end || r, end || w, end || x)
}
}
| module BreakpointUnit( // @[Breakpoint.scala:79:7]
input clock, // @[Breakpoint.scala:79:7]
input reset, // @[Breakpoint.scala:79:7]
input io_status_debug, // @[Breakpoint.scala:80:14]
input io_status_cease, // @[Breakpoint.scala:80:14]
input io_status_wfi, // @[Breakpoint.scala:80:14]
input [31:0] io_status_isa, // @[Breakpoint.scala:80:14]
input [1:0] io_status_dprv, // @[Breakpoint.scala:80:14]
input io_status_dv, // @[Breakpoint.scala:80:14]
input [1:0] io_status_prv, // @[Breakpoint.scala:80:14]
input io_status_v, // @[Breakpoint.scala:80:14]
input io_status_mpv, // @[Breakpoint.scala:80:14]
input io_status_gva, // @[Breakpoint.scala:80:14]
input io_status_tsr, // @[Breakpoint.scala:80:14]
input io_status_tw, // @[Breakpoint.scala:80:14]
input io_status_tvm, // @[Breakpoint.scala:80:14]
input io_status_mxr, // @[Breakpoint.scala:80:14]
input io_status_sum, // @[Breakpoint.scala:80:14]
input io_status_mprv, // @[Breakpoint.scala:80:14]
input [1:0] io_status_fs, // @[Breakpoint.scala:80:14]
input [1:0] io_status_mpp, // @[Breakpoint.scala:80:14]
input io_status_spp, // @[Breakpoint.scala:80:14]
input io_status_mpie, // @[Breakpoint.scala:80:14]
input io_status_spie, // @[Breakpoint.scala:80:14]
input io_status_mie, // @[Breakpoint.scala:80:14]
input io_status_sie, // @[Breakpoint.scala:80:14]
input io_bp_0_control_dmode, // @[Breakpoint.scala:80:14]
input io_bp_0_control_action, // @[Breakpoint.scala:80:14]
input [1:0] io_bp_0_control_tmatch, // @[Breakpoint.scala:80:14]
input io_bp_0_control_m, // @[Breakpoint.scala:80:14]
input io_bp_0_control_s, // @[Breakpoint.scala:80:14]
input io_bp_0_control_u, // @[Breakpoint.scala:80:14]
input io_bp_0_control_x, // @[Breakpoint.scala:80:14]
input io_bp_0_control_w, // @[Breakpoint.scala:80:14]
input io_bp_0_control_r, // @[Breakpoint.scala:80:14]
input [38:0] io_bp_0_address, // @[Breakpoint.scala:80:14]
input [47:0] io_bp_0_textra_pad2, // @[Breakpoint.scala:80:14]
input io_bp_0_textra_pad1, // @[Breakpoint.scala:80:14]
input [38:0] io_pc, // @[Breakpoint.scala:80:14]
input [38:0] io_ea, // @[Breakpoint.scala:80:14]
output io_xcpt_if, // @[Breakpoint.scala:80:14]
output io_xcpt_ld, // @[Breakpoint.scala:80:14]
output io_xcpt_st, // @[Breakpoint.scala:80:14]
output io_debug_if, // @[Breakpoint.scala:80:14]
output io_debug_ld, // @[Breakpoint.scala:80:14]
output io_debug_st, // @[Breakpoint.scala:80:14]
output io_bpwatch_0_rvalid_0, // @[Breakpoint.scala:80:14]
output io_bpwatch_0_wvalid_0, // @[Breakpoint.scala:80:14]
output io_bpwatch_0_ivalid_0 // @[Breakpoint.scala:80:14]
);
wire io_status_debug_0 = io_status_debug; // @[Breakpoint.scala:79:7]
wire io_status_cease_0 = io_status_cease; // @[Breakpoint.scala:79:7]
wire io_status_wfi_0 = io_status_wfi; // @[Breakpoint.scala:79:7]
wire [31:0] io_status_isa_0 = io_status_isa; // @[Breakpoint.scala:79:7]
wire [1:0] io_status_dprv_0 = io_status_dprv; // @[Breakpoint.scala:79:7]
wire io_status_dv_0 = io_status_dv; // @[Breakpoint.scala:79:7]
wire [1:0] io_status_prv_0 = io_status_prv; // @[Breakpoint.scala:79:7]
wire io_status_v_0 = io_status_v; // @[Breakpoint.scala:79:7]
wire io_status_mpv_0 = io_status_mpv; // @[Breakpoint.scala:79:7]
wire io_status_gva_0 = io_status_gva; // @[Breakpoint.scala:79:7]
wire io_status_tsr_0 = io_status_tsr; // @[Breakpoint.scala:79:7]
wire io_status_tw_0 = io_status_tw; // @[Breakpoint.scala:79:7]
wire io_status_tvm_0 = io_status_tvm; // @[Breakpoint.scala:79:7]
wire io_status_mxr_0 = io_status_mxr; // @[Breakpoint.scala:79:7]
wire io_status_sum_0 = io_status_sum; // @[Breakpoint.scala:79:7]
wire io_status_mprv_0 = io_status_mprv; // @[Breakpoint.scala:79:7]
wire [1:0] io_status_fs_0 = io_status_fs; // @[Breakpoint.scala:79:7]
wire [1:0] io_status_mpp_0 = io_status_mpp; // @[Breakpoint.scala:79:7]
wire io_status_spp_0 = io_status_spp; // @[Breakpoint.scala:79:7]
wire io_status_mpie_0 = io_status_mpie; // @[Breakpoint.scala:79:7]
wire io_status_spie_0 = io_status_spie; // @[Breakpoint.scala:79:7]
wire io_status_mie_0 = io_status_mie; // @[Breakpoint.scala:79:7]
wire io_status_sie_0 = io_status_sie; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_dmode_0 = io_bp_0_control_dmode; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_action_0 = io_bp_0_control_action; // @[Breakpoint.scala:79:7]
wire [1:0] io_bp_0_control_tmatch_0 = io_bp_0_control_tmatch; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_m_0 = io_bp_0_control_m; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_s_0 = io_bp_0_control_s; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_u_0 = io_bp_0_control_u; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_x_0 = io_bp_0_control_x; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_w_0 = io_bp_0_control_w; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_r_0 = io_bp_0_control_r; // @[Breakpoint.scala:79:7]
wire [38:0] io_bp_0_address_0 = io_bp_0_address; // @[Breakpoint.scala:79:7]
wire [47:0] io_bp_0_textra_pad2_0 = io_bp_0_textra_pad2; // @[Breakpoint.scala:79:7]
wire io_bp_0_textra_pad1_0 = io_bp_0_textra_pad1; // @[Breakpoint.scala:79:7]
wire [38:0] io_pc_0 = io_pc; // @[Breakpoint.scala:79:7]
wire [38:0] io_ea_0 = io_ea; // @[Breakpoint.scala:79:7]
wire [1:0] io_status_sxl = 2'h2; // @[Breakpoint.scala:79:7, :80:14]
wire [1:0] io_status_uxl = 2'h2; // @[Breakpoint.scala:79:7, :80:14]
wire [39:0] io_bp_0_control_reserved = 40'h0; // @[Breakpoint.scala:79:7, :80:14]
wire [5:0] io_bp_0_control_maskmax = 6'h4; // @[Breakpoint.scala:79:7, :80:14]
wire [3:0] io_bp_0_control_ttype = 4'h2; // @[Breakpoint.scala:79:7, :80:14]
wire [1:0] io_status_vs = 2'h0; // @[Breakpoint.scala:79:7, :80:14]
wire [1:0] io_bp_0_control_zero = 2'h0; // @[Breakpoint.scala:79:7, :80:14]
wire [1:0] io_status_xs = 2'h3; // @[Breakpoint.scala:79:7, :80:14]
wire [7:0] io_status_zero1 = 8'h0; // @[Breakpoint.scala:79:7, :80:14]
wire io_status_mbe = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_sbe = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_sd_rv32 = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_ube = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_upie = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_hie = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_uie = 1'h0; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_chain = 1'h0; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_h = 1'h0; // @[Breakpoint.scala:79:7]
wire io_bp_0_textra_mselect = 1'h0; // @[Breakpoint.scala:79:7]
wire io_bp_0_textra_sselect = 1'h0; // @[Breakpoint.scala:79:7]
wire [22:0] io_status_zero2 = 23'h0; // @[Breakpoint.scala:79:7, :80:14]
wire io_status_sd = 1'h1; // @[Breakpoint.scala:79:7]
wire cx = 1'h1; // @[Breakpoint.scala:55:126]
wire end_0 = 1'h1; // @[Breakpoint.scala:109:15]
wire _io_debug_ld_T = io_bp_0_control_action_0; // @[Breakpoint.scala:79:7, :118:84]
wire _io_debug_st_T = io_bp_0_control_action_0; // @[Breakpoint.scala:79:7, :119:84]
wire _io_debug_if_T = io_bp_0_control_action_0; // @[Breakpoint.scala:79:7, :120:84]
wire r; // @[Breakpoint.scala:106:58]
wire w; // @[Breakpoint.scala:107:58]
wire x; // @[Breakpoint.scala:108:58]
wire io_bpwatch_0_valid_0; // @[Breakpoint.scala:79:7]
wire io_bpwatch_0_rvalid_0_0; // @[Breakpoint.scala:79:7]
wire io_bpwatch_0_wvalid_0_0; // @[Breakpoint.scala:79:7]
wire io_bpwatch_0_ivalid_0_0; // @[Breakpoint.scala:79:7]
wire [2:0] io_bpwatch_0_action; // @[Breakpoint.scala:79:7]
wire io_xcpt_if_0; // @[Breakpoint.scala:79:7]
wire io_xcpt_ld_0; // @[Breakpoint.scala:79:7]
wire io_xcpt_st_0; // @[Breakpoint.scala:79:7]
wire io_debug_if_0; // @[Breakpoint.scala:79:7]
wire io_debug_ld_0; // @[Breakpoint.scala:79:7]
wire io_debug_st_0; // @[Breakpoint.scala:79:7]
wire _en_T = ~io_status_debug_0; // @[Breakpoint.scala:30:35, :79:7]
wire [1:0] en_lo = {io_bp_0_control_s_0, io_bp_0_control_u_0}; // @[Breakpoint.scala:30:56, :79:7]
wire [1:0] en_hi = {io_bp_0_control_m_0, 1'h0}; // @[Breakpoint.scala:30:56, :79:7]
wire [3:0] _en_T_1 = {en_hi, en_lo}; // @[Breakpoint.scala:30:56]
wire [3:0] _en_T_2 = _en_T_1 >> io_status_prv_0; // @[Breakpoint.scala:30:{56,68}, :79:7]
wire _en_T_3 = _en_T_2[0]; // @[Breakpoint.scala:30:68]
wire en = _en_T & _en_T_3; // @[Breakpoint.scala:30:{35,50,68}]
wire _r_T = en & io_bp_0_control_r_0; // @[Breakpoint.scala:30:50, :79:7, :106:16]
wire _r_T_1 = io_bp_0_control_tmatch_0[1]; // @[Breakpoint.scala:68:23, :79:7]
wire _w_T_1 = io_bp_0_control_tmatch_0[1]; // @[Breakpoint.scala:68:23, :79:7]
wire _x_T_1 = io_bp_0_control_tmatch_0[1]; // @[Breakpoint.scala:68:23, :79:7]
wire _GEN = io_ea_0 >= io_bp_0_address_0; // @[Breakpoint.scala:65:8, :79:7]
wire _r_T_2; // @[Breakpoint.scala:65:8]
assign _r_T_2 = _GEN; // @[Breakpoint.scala:65:8]
wire _w_T_2; // @[Breakpoint.scala:65:8]
assign _w_T_2 = _GEN; // @[Breakpoint.scala:65:8]
wire _r_T_3 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:65:36, :79:7]
wire _r_T_6 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:59:56, :65:36, :79:7]
wire _r_T_16 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:59:56, :65:36, :79:7]
wire _w_T_3 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:65:36, :79:7]
wire _w_T_6 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:59:56, :65:36, :79:7]
wire _w_T_16 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:59:56, :65:36, :79:7]
wire _x_T_3 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:65:36, :79:7]
wire _x_T_6 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:59:56, :65:36, :79:7]
wire _x_T_16 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:59:56, :65:36, :79:7]
wire _r_T_4 = _r_T_2 ^ _r_T_3; // @[Breakpoint.scala:65:{8,20,36}]
wire [38:0] _r_T_5 = ~io_ea_0; // @[Breakpoint.scala:62:6, :79:7]
wire _r_T_7 = io_bp_0_address_0[0]; // @[Breakpoint.scala:59:83, :79:7]
wire _r_T_17 = io_bp_0_address_0[0]; // @[Breakpoint.scala:59:83, :79:7]
wire _w_T_7 = io_bp_0_address_0[0]; // @[Breakpoint.scala:59:83, :79:7]
wire _w_T_17 = io_bp_0_address_0[0]; // @[Breakpoint.scala:59:83, :79:7]
wire _x_T_7 = io_bp_0_address_0[0]; // @[Breakpoint.scala:59:83, :79:7]
wire _x_T_17 = io_bp_0_address_0[0]; // @[Breakpoint.scala:59:83, :79:7]
wire _r_T_8 = _r_T_6 & _r_T_7; // @[Breakpoint.scala:59:{56,73,83}]
wire _r_T_9 = io_bp_0_address_0[1]; // @[Breakpoint.scala:59:83, :79:7]
wire _r_T_19 = io_bp_0_address_0[1]; // @[Breakpoint.scala:59:83, :79:7]
wire _w_T_9 = io_bp_0_address_0[1]; // @[Breakpoint.scala:59:83, :79:7]
wire _w_T_19 = io_bp_0_address_0[1]; // @[Breakpoint.scala:59:83, :79:7]
wire _x_T_9 = io_bp_0_address_0[1]; // @[Breakpoint.scala:59:83, :79:7]
wire _x_T_19 = io_bp_0_address_0[1]; // @[Breakpoint.scala:59:83, :79:7]
wire _r_T_10 = _r_T_8 & _r_T_9; // @[Breakpoint.scala:59:{73,83}]
wire _r_T_11 = io_bp_0_address_0[2]; // @[Breakpoint.scala:59:83, :79:7]
wire _r_T_21 = io_bp_0_address_0[2]; // @[Breakpoint.scala:59:83, :79:7]
wire _w_T_11 = io_bp_0_address_0[2]; // @[Breakpoint.scala:59:83, :79:7]
wire _w_T_21 = io_bp_0_address_0[2]; // @[Breakpoint.scala:59:83, :79:7]
wire _x_T_11 = io_bp_0_address_0[2]; // @[Breakpoint.scala:59:83, :79:7]
wire _x_T_21 = io_bp_0_address_0[2]; // @[Breakpoint.scala:59:83, :79:7]
wire _r_T_12 = _r_T_10 & _r_T_11; // @[Breakpoint.scala:59:{73,83}]
wire [1:0] r_lo = {_r_T_8, _r_T_6}; // @[package.scala:45:27]
wire [1:0] r_hi = {_r_T_12, _r_T_10}; // @[package.scala:45:27]
wire [3:0] _r_T_13 = {r_hi, r_lo}; // @[package.scala:45:27]
wire [38:0] _r_T_14 = {_r_T_5[38:4], _r_T_5[3:0] | _r_T_13}; // @[package.scala:45:27]
wire [38:0] _r_T_15 = ~io_bp_0_address_0; // @[Breakpoint.scala:62:24, :79:7]
wire _r_T_18 = _r_T_16 & _r_T_17; // @[Breakpoint.scala:59:{56,73,83}]
wire _r_T_20 = _r_T_18 & _r_T_19; // @[Breakpoint.scala:59:{73,83}]
wire _r_T_22 = _r_T_20 & _r_T_21; // @[Breakpoint.scala:59:{73,83}]
wire [1:0] r_lo_1 = {_r_T_18, _r_T_16}; // @[package.scala:45:27]
wire [1:0] r_hi_1 = {_r_T_22, _r_T_20}; // @[package.scala:45:27]
wire [3:0] _r_T_23 = {r_hi_1, r_lo_1}; // @[package.scala:45:27]
wire [38:0] _r_T_24 = {_r_T_15[38:4], _r_T_15[3:0] | _r_T_23}; // @[package.scala:45:27]
wire _r_T_25 = _r_T_14 == _r_T_24; // @[Breakpoint.scala:62:{9,19,33}]
wire _r_T_26 = _r_T_1 ? _r_T_4 : _r_T_25; // @[Breakpoint.scala:62:19, :65:20, :68:{8,23}]
wire _r_T_27 = _r_T & _r_T_26; // @[Breakpoint.scala:68:8, :106:{16,32}]
assign r = _r_T_27; // @[Breakpoint.scala:106:{32,58}]
assign io_bpwatch_0_rvalid_0_0 = r; // @[Breakpoint.scala:79:7, :106:58]
wire _w_T = en & io_bp_0_control_w_0; // @[Breakpoint.scala:30:50, :79:7, :107:16]
wire _w_T_4 = _w_T_2 ^ _w_T_3; // @[Breakpoint.scala:65:{8,20,36}]
wire [38:0] _w_T_5 = ~io_ea_0; // @[Breakpoint.scala:62:6, :79:7]
wire _w_T_8 = _w_T_6 & _w_T_7; // @[Breakpoint.scala:59:{56,73,83}]
wire _w_T_10 = _w_T_8 & _w_T_9; // @[Breakpoint.scala:59:{73,83}]
wire _w_T_12 = _w_T_10 & _w_T_11; // @[Breakpoint.scala:59:{73,83}]
wire [1:0] w_lo = {_w_T_8, _w_T_6}; // @[package.scala:45:27]
wire [1:0] w_hi = {_w_T_12, _w_T_10}; // @[package.scala:45:27]
wire [3:0] _w_T_13 = {w_hi, w_lo}; // @[package.scala:45:27]
wire [38:0] _w_T_14 = {_w_T_5[38:4], _w_T_5[3:0] | _w_T_13}; // @[package.scala:45:27]
wire [38:0] _w_T_15 = ~io_bp_0_address_0; // @[Breakpoint.scala:62:24, :79:7]
wire _w_T_18 = _w_T_16 & _w_T_17; // @[Breakpoint.scala:59:{56,73,83}]
wire _w_T_20 = _w_T_18 & _w_T_19; // @[Breakpoint.scala:59:{73,83}]
wire _w_T_22 = _w_T_20 & _w_T_21; // @[Breakpoint.scala:59:{73,83}]
wire [1:0] w_lo_1 = {_w_T_18, _w_T_16}; // @[package.scala:45:27]
wire [1:0] w_hi_1 = {_w_T_22, _w_T_20}; // @[package.scala:45:27]
wire [3:0] _w_T_23 = {w_hi_1, w_lo_1}; // @[package.scala:45:27]
wire [38:0] _w_T_24 = {_w_T_15[38:4], _w_T_15[3:0] | _w_T_23}; // @[package.scala:45:27]
wire _w_T_25 = _w_T_14 == _w_T_24; // @[Breakpoint.scala:62:{9,19,33}]
wire _w_T_26 = _w_T_1 ? _w_T_4 : _w_T_25; // @[Breakpoint.scala:62:19, :65:20, :68:{8,23}]
wire _w_T_27 = _w_T & _w_T_26; // @[Breakpoint.scala:68:8, :107:{16,32}]
assign w = _w_T_27; // @[Breakpoint.scala:107:{32,58}]
assign io_bpwatch_0_wvalid_0_0 = w; // @[Breakpoint.scala:79:7, :107:58]
wire _x_T = en & io_bp_0_control_x_0; // @[Breakpoint.scala:30:50, :79:7, :108:16]
wire _x_T_2 = io_pc_0 >= io_bp_0_address_0; // @[Breakpoint.scala:65:8, :79:7]
wire _x_T_4 = _x_T_2 ^ _x_T_3; // @[Breakpoint.scala:65:{8,20,36}]
wire [38:0] _x_T_5 = ~io_pc_0; // @[Breakpoint.scala:62:6, :79:7]
wire _x_T_8 = _x_T_6 & _x_T_7; // @[Breakpoint.scala:59:{56,73,83}]
wire _x_T_10 = _x_T_8 & _x_T_9; // @[Breakpoint.scala:59:{73,83}]
wire _x_T_12 = _x_T_10 & _x_T_11; // @[Breakpoint.scala:59:{73,83}]
wire [1:0] x_lo = {_x_T_8, _x_T_6}; // @[package.scala:45:27]
wire [1:0] x_hi = {_x_T_12, _x_T_10}; // @[package.scala:45:27]
wire [3:0] _x_T_13 = {x_hi, x_lo}; // @[package.scala:45:27]
wire [38:0] _x_T_14 = {_x_T_5[38:4], _x_T_5[3:0] | _x_T_13}; // @[package.scala:45:27]
wire [38:0] _x_T_15 = ~io_bp_0_address_0; // @[Breakpoint.scala:62:24, :79:7]
wire _x_T_18 = _x_T_16 & _x_T_17; // @[Breakpoint.scala:59:{56,73,83}]
wire _x_T_20 = _x_T_18 & _x_T_19; // @[Breakpoint.scala:59:{73,83}]
wire _x_T_22 = _x_T_20 & _x_T_21; // @[Breakpoint.scala:59:{73,83}]
wire [1:0] x_lo_1 = {_x_T_18, _x_T_16}; // @[package.scala:45:27]
wire [1:0] x_hi_1 = {_x_T_22, _x_T_20}; // @[package.scala:45:27]
wire [3:0] _x_T_23 = {x_hi_1, x_lo_1}; // @[package.scala:45:27]
wire [38:0] _x_T_24 = {_x_T_15[38:4], _x_T_15[3:0] | _x_T_23}; // @[package.scala:45:27]
wire _x_T_25 = _x_T_14 == _x_T_24; // @[Breakpoint.scala:62:{9,19,33}]
wire _x_T_26 = _x_T_1 ? _x_T_4 : _x_T_25; // @[Breakpoint.scala:62:19, :65:20, :68:{8,23}]
wire _x_T_27 = _x_T & _x_T_26; // @[Breakpoint.scala:68:8, :108:{16,32}]
assign x = _x_T_27; // @[Breakpoint.scala:108:{32,58}]
assign io_bpwatch_0_ivalid_0_0 = x; // @[Breakpoint.scala:79:7, :108:58]
assign io_bpwatch_0_action = {2'h0, io_bp_0_control_action_0}; // @[Breakpoint.scala:79:7, :80:14, :112:16]
wire _io_xcpt_ld_T = ~io_bp_0_control_action_0; // @[Breakpoint.scala:79:7, :118:51]
assign io_xcpt_ld_0 = r & _io_xcpt_ld_T; // @[Breakpoint.scala:79:7, :97:14, :106:58, :118:{27,40,51}]
assign io_debug_ld_0 = r & _io_debug_ld_T; // @[Breakpoint.scala:79:7, :100:15, :106:58, :118:{27,73,84}]
wire _io_xcpt_st_T = ~io_bp_0_control_action_0; // @[Breakpoint.scala:79:7, :118:51, :119:51]
assign io_xcpt_st_0 = w & _io_xcpt_st_T; // @[Breakpoint.scala:79:7, :98:14, :107:58, :119:{27,40,51}]
assign io_debug_st_0 = w & _io_debug_st_T; // @[Breakpoint.scala:79:7, :101:15, :107:58, :119:{27,73,84}]
wire _io_xcpt_if_T = ~io_bp_0_control_action_0; // @[Breakpoint.scala:79:7, :118:51, :120:51]
assign io_xcpt_if_0 = x & _io_xcpt_if_T; // @[Breakpoint.scala:79:7, :96:14, :108:58, :120:{27,40,51}]
assign io_debug_if_0 = x & _io_debug_if_T; // @[Breakpoint.scala:79:7, :99:15, :108:58, :120:{27,73,84}]
assign io_bpwatch_0_valid_0 = x | w | r; // @[Breakpoint.scala:79:7, :106:58, :107:58, :108:58, :118:27, :119:{27,107}, :120:{27,107}]
assign io_xcpt_if = io_xcpt_if_0; // @[Breakpoint.scala:79:7]
assign io_xcpt_ld = io_xcpt_ld_0; // @[Breakpoint.scala:79:7]
assign io_xcpt_st = io_xcpt_st_0; // @[Breakpoint.scala:79:7]
assign io_debug_if = io_debug_if_0; // @[Breakpoint.scala:79:7]
assign io_debug_ld = io_debug_ld_0; // @[Breakpoint.scala:79:7]
assign io_debug_st = io_debug_st_0; // @[Breakpoint.scala:79:7]
assign io_bpwatch_0_rvalid_0 = io_bpwatch_0_rvalid_0_0; // @[Breakpoint.scala:79:7]
assign io_bpwatch_0_wvalid_0 = io_bpwatch_0_wvalid_0_0; // @[Breakpoint.scala:79:7]
assign io_bpwatch_0_ivalid_0 = io_bpwatch_0_ivalid_0_0; // @[Breakpoint.scala:79:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File primitives.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object lowMask
{
def apply(in: UInt, topBound: BigInt, bottomBound: BigInt): UInt =
{
require(topBound != bottomBound)
val numInVals = BigInt(1)<<in.getWidth
if (topBound < bottomBound) {
lowMask(~in, numInVals - 1 - topBound, numInVals - 1 - bottomBound)
} else if (numInVals > 64 /* Empirical */) {
// For simulation performance, we should avoid generating
// exteremely wide shifters, so we divide and conquer.
// Empirically, this does not impact synthesis QoR.
val mid = numInVals / 2
val msb = in(in.getWidth - 1)
val lsbs = in(in.getWidth - 2, 0)
if (mid < topBound) {
if (mid <= bottomBound) {
Mux(msb,
lowMask(lsbs, topBound - mid, bottomBound - mid),
0.U
)
} else {
Mux(msb,
lowMask(lsbs, topBound - mid, 0) ## ((BigInt(1)<<(mid - bottomBound).toInt) - 1).U,
lowMask(lsbs, mid, bottomBound)
)
}
} else {
~Mux(msb, 0.U, ~lowMask(lsbs, topBound, bottomBound))
}
} else {
val shift = (BigInt(-1)<<numInVals.toInt).S>>in
Reverse(
shift(
(numInVals - 1 - bottomBound).toInt,
(numInVals - topBound).toInt
)
)
}
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object countLeadingZeros
{
def apply(in: UInt): UInt = PriorityEncoder(in.asBools.reverse)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy2
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 1)>>1
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 2 + 1, ix * 2).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 2).orR
reducedVec.asUInt
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy4
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 3)>>2
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 4 + 3, ix * 4).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 4).orR
reducedVec.asUInt
}
}
File RoundAnyRawFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util.Fill
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundAnyRawFNToRecFN(
inExpWidth: Int,
inSigWidth: Int,
outExpWidth: Int,
outSigWidth: Int,
options: Int
)
extends RawModule
{
override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(inExpWidth, inSigWidth))
// (allowed exponent range has limits)
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0)
val effectiveInSigWidth =
if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1
val neverUnderflows =
((options &
(flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact)
) != 0) ||
(inExpWidth < outExpWidth)
val neverOverflows =
((options & flRoundOpt_neverOverflows) != 0) ||
(inExpWidth < outExpWidth)
val outNaNExp = BigInt(7)<<(outExpWidth - 2)
val outInfExp = BigInt(6)<<(outExpWidth - 2)
val outMaxFiniteExp = outInfExp - 1
val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2
val outMinNonzeroExp = outMinNormExp - outSigWidth + 1
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_near_even = (io.roundingMode === round_near_even)
val roundingMode_minMag = (io.roundingMode === round_minMag)
val roundingMode_min = (io.roundingMode === round_min)
val roundingMode_max = (io.roundingMode === round_max)
val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag)
val roundingMode_odd = (io.roundingMode === round_odd)
val roundMagUp =
(roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sAdjustedExp =
if (inExpWidth < outExpWidth)
(io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
)(outExpWidth, 0).zext
else if (inExpWidth == outExpWidth)
io.in.sExp
else
io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
val adjustedSig =
if (inSigWidth <= outSigWidth + 2)
io.in.sig<<(outSigWidth - inSigWidth + 2)
else
(io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ##
io.in.sig(inSigWidth - outSigWidth - 2, 0).orR
)
val doShiftSigDown1 =
if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2)
val common_expOut = Wire(UInt((outExpWidth + 1).W))
val common_fractOut = Wire(UInt((outSigWidth - 1).W))
val common_overflow = Wire(Bool())
val common_totalUnderflow = Wire(Bool())
val common_underflow = Wire(Bool())
val common_inexact = Wire(Bool())
if (
neverOverflows && neverUnderflows
&& (effectiveInSigWidth <= outSigWidth)
) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1
common_fractOut :=
Mux(doShiftSigDown1,
adjustedSig(outSigWidth + 1, 3),
adjustedSig(outSigWidth, 2)
)
common_overflow := false.B
common_totalUnderflow := false.B
common_underflow := false.B
common_inexact := false.B
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundMask =
if (neverUnderflows)
0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W)
else
(lowMask(
sAdjustedExp(outExpWidth, 0),
outMinNormExp - outSigWidth - 1,
outMinNormExp
) | doShiftSigDown1) ##
3.U(2.W)
val shiftedRoundMask = 0.U(1.W) ## roundMask>>1
val roundPosMask = ~shiftedRoundMask & roundMask
val roundPosBit = (adjustedSig & roundPosMask).orR
val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR
val anyRound = roundPosBit || anyRoundExtra
val roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
roundPosBit) ||
(roundMagUp && anyRound)
val roundedSig: Bits =
Mux(roundIncr,
(((adjustedSig | roundMask)>>2) +& 1.U) &
~Mux(roundingMode_near_even && roundPosBit &&
! anyRoundExtra,
roundMask>>1,
0.U((outSigWidth + 2).W)
),
(adjustedSig & ~roundMask)>>2 |
Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U)
)
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext
common_expOut := sRoundedExp(outExpWidth, 0)
common_fractOut :=
Mux(doShiftSigDown1,
roundedSig(outSigWidth - 1, 1),
roundedSig(outSigWidth - 2, 0)
)
common_overflow :=
(if (neverOverflows) false.B else
//*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?:
(sRoundedExp>>(outExpWidth - 1) >= 3.S))
common_totalUnderflow :=
(if (neverUnderflows) false.B else
//*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?:
(sRoundedExp < outMinNonzeroExp.S))
val unboundedRange_roundPosBit =
Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1))
val unboundedRange_anyRound =
(doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR
val unboundedRange_roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
unboundedRange_roundPosBit) ||
(roundMagUp && unboundedRange_anyRound)
val roundCarry =
Mux(doShiftSigDown1,
roundedSig(outSigWidth + 1),
roundedSig(outSigWidth)
)
common_underflow :=
(if (neverUnderflows) false.B else
common_totalUnderflow ||
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
(anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) &&
Mux(doShiftSigDown1, roundMask(3), roundMask(2)) &&
! ((io.detectTininess === tininess_afterRounding) &&
! Mux(doShiftSigDown1,
roundMask(4),
roundMask(3)
) &&
roundCarry && roundPosBit &&
unboundedRange_roundIncr)))
common_inexact := common_totalUnderflow || anyRound
}
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val isNaNOut = io.invalidExc || io.in.isNaN
val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf
val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero
val overflow = commonCase && common_overflow
val underflow = commonCase && common_underflow
val inexact = overflow || (commonCase && common_inexact)
val overflow_roundMagUp =
roundingMode_near_even || roundingMode_near_maxMag || roundMagUp
val pegMinNonzeroMagOut =
commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd)
val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp
val notNaN_isInfOut =
notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp)
val signOut = Mux(isNaNOut, false.B, io.in.sign)
val expOut =
(common_expOut &
~Mux(io.in.isZero || common_totalUnderflow,
(BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMinNonzeroMagOut,
~outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMaxFiniteMagOut,
(BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W),
0.U
) &
~Mux(notNaN_isInfOut,
(BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
)) |
Mux(pegMinNonzeroMagOut,
outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) |
Mux(pegMaxFiniteMagOut,
outMaxFiniteExp.U((outExpWidth + 1).W),
0.U
) |
Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) |
Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U)
val fractOut =
Mux(isNaNOut || io.in.isZero || common_totalUnderflow,
Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U),
common_fractOut
) |
Fill(outSigWidth - 1, pegMaxFiniteMagOut)
io.out := signOut ## expOut ## fractOut
io.exceptionFlags :=
io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int)
extends RawModule
{
override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(expWidth, sigWidth + 2))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
expWidth, sigWidth + 2, expWidth, sigWidth, options))
roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc
roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc
roundAnyRawFNToRecFN.io.in := io.in
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
| module RoundAnyRawFNToRecFN_ie8_is26_oe8_os24_52( // @[RoundAnyRawFNToRecFN.scala:48:5]
input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [9:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [26:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:58:16]
output [32:0] io_out, // @[RoundAnyRawFNToRecFN.scala:58:16]
output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:58:16]
);
wire io_invalidExc_0 = io_invalidExc; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isNaN_0 = io_in_isNaN; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isInf_0 = io_in_isInf; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [9:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [26:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [8:0] _expOut_T_4 = 9'h194; // @[RoundAnyRawFNToRecFN.scala:258:19]
wire [15:0] _roundMask_T_5 = 16'hFF; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_4 = 16'hFF00; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_10 = 16'hFF00; // @[primitives.scala:77:20]
wire [11:0] _roundMask_T_13 = 12'hFF; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_14 = 16'hFF0; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_15 = 16'hF0F; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_20 = 16'hF0F0; // @[primitives.scala:77:20]
wire [13:0] _roundMask_T_23 = 14'hF0F; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_24 = 16'h3C3C; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_25 = 16'h3333; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_30 = 16'hCCCC; // @[primitives.scala:77:20]
wire [14:0] _roundMask_T_33 = 15'h3333; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_34 = 16'h6666; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_35 = 16'h5555; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_40 = 16'hAAAA; // @[primitives.scala:77:20]
wire [25:0] _roundedSig_T_15 = 26'h0; // @[RoundAnyRawFNToRecFN.scala:181:24]
wire [8:0] _expOut_T_6 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14]
wire [8:0] _expOut_T_9 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14]
wire [8:0] _expOut_T_5 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:257:18]
wire [8:0] _expOut_T_8 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:261:18]
wire [8:0] _expOut_T_14 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:269:16]
wire [8:0] _expOut_T_16 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:273:16]
wire [22:0] _fractOut_T_4 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:284:13]
wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire roundingMode_near_even = 1'h1; // @[RoundAnyRawFNToRecFN.scala:90:53]
wire _roundIncr_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:169:38]
wire _unboundedRange_roundIncr_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:207:38]
wire _common_underflow_T_7 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:222:49]
wire _overflow_roundMagUp_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:243:32]
wire overflow_roundMagUp = 1'h1; // @[RoundAnyRawFNToRecFN.scala:243:60]
wire [2:0] io_roundingMode = 3'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire roundingMode_minMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:91:53]
wire roundingMode_min = 1'h0; // @[RoundAnyRawFNToRecFN.scala:92:53]
wire roundingMode_max = 1'h0; // @[RoundAnyRawFNToRecFN.scala:93:53]
wire roundingMode_near_maxMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:94:53]
wire roundingMode_odd = 1'h0; // @[RoundAnyRawFNToRecFN.scala:95:53]
wire _roundMagUp_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:27]
wire _roundMagUp_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:63]
wire roundMagUp = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:42]
wire _roundIncr_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:171:29]
wire _roundedSig_T_13 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:181:42]
wire _unboundedRange_roundIncr_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:209:29]
wire _pegMinNonzeroMagOut_T_1 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:60]
wire pegMinNonzeroMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:45]
wire _pegMaxFiniteMagOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:42]
wire pegMaxFiniteMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:39]
wire notNaN_isSpecialInfOut = io_in_isInf_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :236:49]
wire [26:0] adjustedSig = io_in_sig_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :114:22]
wire [32:0] _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:286:33]
wire [4:0] _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:288:66]
wire [32:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire _roundMagUp_T_1 = ~io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :98:66]
wire doShiftSigDown1 = adjustedSig[26]; // @[RoundAnyRawFNToRecFN.scala:114:22, :120:57]
wire [8:0] _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:187:37]
wire [8:0] common_expOut; // @[RoundAnyRawFNToRecFN.scala:122:31]
wire [22:0] _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:189:16]
wire [22:0] common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31]
wire _common_overflow_T_1; // @[RoundAnyRawFNToRecFN.scala:196:50]
wire common_overflow; // @[RoundAnyRawFNToRecFN.scala:124:37]
wire _common_totalUnderflow_T; // @[RoundAnyRawFNToRecFN.scala:200:31]
wire common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37]
wire _common_underflow_T_18; // @[RoundAnyRawFNToRecFN.scala:217:40]
wire common_underflow; // @[RoundAnyRawFNToRecFN.scala:126:37]
wire _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:230:49]
wire common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37]
wire [8:0] _roundMask_T = io_in_sExp_0[8:0]; // @[RoundAnyRawFNToRecFN.scala:48:5, :156:37]
wire [8:0] _roundMask_T_1 = ~_roundMask_T; // @[primitives.scala:52:21]
wire roundMask_msb = _roundMask_T_1[8]; // @[primitives.scala:52:21, :58:25]
wire [7:0] roundMask_lsbs = _roundMask_T_1[7:0]; // @[primitives.scala:52:21, :59:26]
wire roundMask_msb_1 = roundMask_lsbs[7]; // @[primitives.scala:58:25, :59:26]
wire [6:0] roundMask_lsbs_1 = roundMask_lsbs[6:0]; // @[primitives.scala:59:26]
wire roundMask_msb_2 = roundMask_lsbs_1[6]; // @[primitives.scala:58:25, :59:26]
wire roundMask_msb_3 = roundMask_lsbs_1[6]; // @[primitives.scala:58:25, :59:26]
wire [5:0] roundMask_lsbs_2 = roundMask_lsbs_1[5:0]; // @[primitives.scala:59:26]
wire [5:0] roundMask_lsbs_3 = roundMask_lsbs_1[5:0]; // @[primitives.scala:59:26]
wire [64:0] roundMask_shift = $signed(65'sh10000000000000000 >>> roundMask_lsbs_2); // @[primitives.scala:59:26, :76:56]
wire [21:0] _roundMask_T_2 = roundMask_shift[63:42]; // @[primitives.scala:76:56, :78:22]
wire [15:0] _roundMask_T_3 = _roundMask_T_2[15:0]; // @[primitives.scala:77:20, :78:22]
wire [7:0] _roundMask_T_6 = _roundMask_T_3[15:8]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_7 = {8'h0, _roundMask_T_6}; // @[primitives.scala:77:20]
wire [7:0] _roundMask_T_8 = _roundMask_T_3[7:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_9 = {_roundMask_T_8, 8'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_11 = _roundMask_T_9 & 16'hFF00; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_12 = _roundMask_T_7 | _roundMask_T_11; // @[primitives.scala:77:20]
wire [11:0] _roundMask_T_16 = _roundMask_T_12[15:4]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_17 = {4'h0, _roundMask_T_16 & 12'hF0F}; // @[primitives.scala:77:20]
wire [11:0] _roundMask_T_18 = _roundMask_T_12[11:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_19 = {_roundMask_T_18, 4'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_21 = _roundMask_T_19 & 16'hF0F0; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_22 = _roundMask_T_17 | _roundMask_T_21; // @[primitives.scala:77:20]
wire [13:0] _roundMask_T_26 = _roundMask_T_22[15:2]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_27 = {2'h0, _roundMask_T_26 & 14'h3333}; // @[primitives.scala:77:20]
wire [13:0] _roundMask_T_28 = _roundMask_T_22[13:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_29 = {_roundMask_T_28, 2'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_31 = _roundMask_T_29 & 16'hCCCC; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_32 = _roundMask_T_27 | _roundMask_T_31; // @[primitives.scala:77:20]
wire [14:0] _roundMask_T_36 = _roundMask_T_32[15:1]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_37 = {1'h0, _roundMask_T_36 & 15'h5555}; // @[primitives.scala:77:20]
wire [14:0] _roundMask_T_38 = _roundMask_T_32[14:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_39 = {_roundMask_T_38, 1'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_41 = _roundMask_T_39 & 16'hAAAA; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_42 = _roundMask_T_37 | _roundMask_T_41; // @[primitives.scala:77:20]
wire [5:0] _roundMask_T_43 = _roundMask_T_2[21:16]; // @[primitives.scala:77:20, :78:22]
wire [3:0] _roundMask_T_44 = _roundMask_T_43[3:0]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_45 = _roundMask_T_44[1:0]; // @[primitives.scala:77:20]
wire _roundMask_T_46 = _roundMask_T_45[0]; // @[primitives.scala:77:20]
wire _roundMask_T_47 = _roundMask_T_45[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_48 = {_roundMask_T_46, _roundMask_T_47}; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_49 = _roundMask_T_44[3:2]; // @[primitives.scala:77:20]
wire _roundMask_T_50 = _roundMask_T_49[0]; // @[primitives.scala:77:20]
wire _roundMask_T_51 = _roundMask_T_49[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_52 = {_roundMask_T_50, _roundMask_T_51}; // @[primitives.scala:77:20]
wire [3:0] _roundMask_T_53 = {_roundMask_T_48, _roundMask_T_52}; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_54 = _roundMask_T_43[5:4]; // @[primitives.scala:77:20]
wire _roundMask_T_55 = _roundMask_T_54[0]; // @[primitives.scala:77:20]
wire _roundMask_T_56 = _roundMask_T_54[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_57 = {_roundMask_T_55, _roundMask_T_56}; // @[primitives.scala:77:20]
wire [5:0] _roundMask_T_58 = {_roundMask_T_53, _roundMask_T_57}; // @[primitives.scala:77:20]
wire [21:0] _roundMask_T_59 = {_roundMask_T_42, _roundMask_T_58}; // @[primitives.scala:77:20]
wire [21:0] _roundMask_T_60 = ~_roundMask_T_59; // @[primitives.scala:73:32, :77:20]
wire [21:0] _roundMask_T_61 = roundMask_msb_2 ? 22'h0 : _roundMask_T_60; // @[primitives.scala:58:25, :73:{21,32}]
wire [21:0] _roundMask_T_62 = ~_roundMask_T_61; // @[primitives.scala:73:{17,21}]
wire [24:0] _roundMask_T_63 = {_roundMask_T_62, 3'h7}; // @[primitives.scala:68:58, :73:17]
wire [64:0] roundMask_shift_1 = $signed(65'sh10000000000000000 >>> roundMask_lsbs_3); // @[primitives.scala:59:26, :76:56]
wire [2:0] _roundMask_T_64 = roundMask_shift_1[2:0]; // @[primitives.scala:76:56, :78:22]
wire [1:0] _roundMask_T_65 = _roundMask_T_64[1:0]; // @[primitives.scala:77:20, :78:22]
wire _roundMask_T_66 = _roundMask_T_65[0]; // @[primitives.scala:77:20]
wire _roundMask_T_67 = _roundMask_T_65[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_68 = {_roundMask_T_66, _roundMask_T_67}; // @[primitives.scala:77:20]
wire _roundMask_T_69 = _roundMask_T_64[2]; // @[primitives.scala:77:20, :78:22]
wire [2:0] _roundMask_T_70 = {_roundMask_T_68, _roundMask_T_69}; // @[primitives.scala:77:20]
wire [2:0] _roundMask_T_71 = roundMask_msb_3 ? _roundMask_T_70 : 3'h0; // @[primitives.scala:58:25, :62:24, :77:20]
wire [24:0] _roundMask_T_72 = roundMask_msb_1 ? _roundMask_T_63 : {22'h0, _roundMask_T_71}; // @[primitives.scala:58:25, :62:24, :67:24, :68:58]
wire [24:0] _roundMask_T_73 = roundMask_msb ? _roundMask_T_72 : 25'h0; // @[primitives.scala:58:25, :62:24, :67:24]
wire [24:0] _roundMask_T_74 = {_roundMask_T_73[24:1], _roundMask_T_73[0] | doShiftSigDown1}; // @[primitives.scala:62:24]
wire [26:0] roundMask = {_roundMask_T_74, 2'h3}; // @[RoundAnyRawFNToRecFN.scala:159:{23,42}]
wire [27:0] _shiftedRoundMask_T = {1'h0, roundMask}; // @[RoundAnyRawFNToRecFN.scala:159:42, :162:41]
wire [26:0] shiftedRoundMask = _shiftedRoundMask_T[27:1]; // @[RoundAnyRawFNToRecFN.scala:162:{41,53}]
wire [26:0] _roundPosMask_T = ~shiftedRoundMask; // @[RoundAnyRawFNToRecFN.scala:162:53, :163:28]
wire [26:0] roundPosMask = _roundPosMask_T & roundMask; // @[RoundAnyRawFNToRecFN.scala:159:42, :163:{28,46}]
wire [26:0] _roundPosBit_T = adjustedSig & roundPosMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :163:46, :164:40]
wire roundPosBit = |_roundPosBit_T; // @[RoundAnyRawFNToRecFN.scala:164:{40,56}]
wire _roundIncr_T_1 = roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :169:67]
wire _roundedSig_T_3 = roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :175:49]
wire [26:0] _anyRoundExtra_T = adjustedSig & shiftedRoundMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :162:53, :165:42]
wire anyRoundExtra = |_anyRoundExtra_T; // @[RoundAnyRawFNToRecFN.scala:165:{42,62}]
wire anyRound = roundPosBit | anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:164:56, :165:62, :166:36]
wire roundIncr = _roundIncr_T_1; // @[RoundAnyRawFNToRecFN.scala:169:67, :170:31]
wire [26:0] _roundedSig_T = adjustedSig | roundMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :159:42, :174:32]
wire [24:0] _roundedSig_T_1 = _roundedSig_T[26:2]; // @[RoundAnyRawFNToRecFN.scala:174:{32,44}]
wire [25:0] _roundedSig_T_2 = {1'h0, _roundedSig_T_1} + 26'h1; // @[RoundAnyRawFNToRecFN.scala:174:{44,49}]
wire _roundedSig_T_4 = ~anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:165:62, :176:30]
wire _roundedSig_T_5 = _roundedSig_T_3 & _roundedSig_T_4; // @[RoundAnyRawFNToRecFN.scala:175:{49,64}, :176:30]
wire [25:0] _roundedSig_T_6 = roundMask[26:1]; // @[RoundAnyRawFNToRecFN.scala:159:42, :177:35]
wire [25:0] _roundedSig_T_7 = _roundedSig_T_5 ? _roundedSig_T_6 : 26'h0; // @[RoundAnyRawFNToRecFN.scala:175:{25,64}, :177:35]
wire [25:0] _roundedSig_T_8 = ~_roundedSig_T_7; // @[RoundAnyRawFNToRecFN.scala:175:{21,25}]
wire [25:0] _roundedSig_T_9 = _roundedSig_T_2 & _roundedSig_T_8; // @[RoundAnyRawFNToRecFN.scala:174:{49,57}, :175:21]
wire [26:0] _roundedSig_T_10 = ~roundMask; // @[RoundAnyRawFNToRecFN.scala:159:42, :180:32]
wire [26:0] _roundedSig_T_11 = adjustedSig & _roundedSig_T_10; // @[RoundAnyRawFNToRecFN.scala:114:22, :180:{30,32}]
wire [24:0] _roundedSig_T_12 = _roundedSig_T_11[26:2]; // @[RoundAnyRawFNToRecFN.scala:180:{30,43}]
wire [25:0] _roundedSig_T_14 = roundPosMask[26:1]; // @[RoundAnyRawFNToRecFN.scala:163:46, :181:67]
wire [25:0] _roundedSig_T_16 = {1'h0, _roundedSig_T_12}; // @[RoundAnyRawFNToRecFN.scala:180:{43,47}]
wire [25:0] roundedSig = roundIncr ? _roundedSig_T_9 : _roundedSig_T_16; // @[RoundAnyRawFNToRecFN.scala:170:31, :173:16, :174:57, :180:47]
wire [1:0] _sRoundedExp_T = roundedSig[25:24]; // @[RoundAnyRawFNToRecFN.scala:173:16, :185:54]
wire [2:0] _sRoundedExp_T_1 = {1'h0, _sRoundedExp_T}; // @[RoundAnyRawFNToRecFN.scala:185:{54,76}]
wire [10:0] sRoundedExp = {io_in_sExp_0[9], io_in_sExp_0} + {{8{_sRoundedExp_T_1[2]}}, _sRoundedExp_T_1}; // @[RoundAnyRawFNToRecFN.scala:48:5, :185:{40,76}]
assign _common_expOut_T = sRoundedExp[8:0]; // @[RoundAnyRawFNToRecFN.scala:185:40, :187:37]
assign common_expOut = _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:122:31, :187:37]
wire [22:0] _common_fractOut_T = roundedSig[23:1]; // @[RoundAnyRawFNToRecFN.scala:173:16, :190:27]
wire [22:0] _common_fractOut_T_1 = roundedSig[22:0]; // @[RoundAnyRawFNToRecFN.scala:173:16, :191:27]
assign _common_fractOut_T_2 = doShiftSigDown1 ? _common_fractOut_T : _common_fractOut_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :189:16, :190:27, :191:27]
assign common_fractOut = _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:123:31, :189:16]
wire [3:0] _common_overflow_T = sRoundedExp[10:7]; // @[RoundAnyRawFNToRecFN.scala:185:40, :196:30]
assign _common_overflow_T_1 = $signed(_common_overflow_T) > 4'sh2; // @[RoundAnyRawFNToRecFN.scala:196:{30,50}]
assign common_overflow = _common_overflow_T_1; // @[RoundAnyRawFNToRecFN.scala:124:37, :196:50]
assign _common_totalUnderflow_T = $signed(sRoundedExp) < 11'sh6B; // @[RoundAnyRawFNToRecFN.scala:185:40, :200:31]
assign common_totalUnderflow = _common_totalUnderflow_T; // @[RoundAnyRawFNToRecFN.scala:125:37, :200:31]
wire _unboundedRange_roundPosBit_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:45]
wire _unboundedRange_anyRound_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:45, :205:44]
wire _unboundedRange_roundPosBit_T_1 = adjustedSig[1]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:61]
wire unboundedRange_roundPosBit = doShiftSigDown1 ? _unboundedRange_roundPosBit_T : _unboundedRange_roundPosBit_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :203:{16,45,61}]
wire _unboundedRange_roundIncr_T_1 = unboundedRange_roundPosBit; // @[RoundAnyRawFNToRecFN.scala:203:16, :207:67]
wire _unboundedRange_anyRound_T_1 = doShiftSigDown1 & _unboundedRange_anyRound_T; // @[RoundAnyRawFNToRecFN.scala:120:57, :205:{30,44}]
wire [1:0] _unboundedRange_anyRound_T_2 = adjustedSig[1:0]; // @[RoundAnyRawFNToRecFN.scala:114:22, :205:63]
wire _unboundedRange_anyRound_T_3 = |_unboundedRange_anyRound_T_2; // @[RoundAnyRawFNToRecFN.scala:205:{63,70}]
wire unboundedRange_anyRound = _unboundedRange_anyRound_T_1 | _unboundedRange_anyRound_T_3; // @[RoundAnyRawFNToRecFN.scala:205:{30,49,70}]
wire unboundedRange_roundIncr = _unboundedRange_roundIncr_T_1; // @[RoundAnyRawFNToRecFN.scala:207:67, :208:46]
wire _roundCarry_T = roundedSig[25]; // @[RoundAnyRawFNToRecFN.scala:173:16, :212:27]
wire _roundCarry_T_1 = roundedSig[24]; // @[RoundAnyRawFNToRecFN.scala:173:16, :213:27]
wire roundCarry = doShiftSigDown1 ? _roundCarry_T : _roundCarry_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :211:16, :212:27, :213:27]
wire [1:0] _common_underflow_T = io_in_sExp_0[9:8]; // @[RoundAnyRawFNToRecFN.scala:48:5, :220:49]
wire _common_underflow_T_1 = _common_underflow_T != 2'h1; // @[RoundAnyRawFNToRecFN.scala:220:{49,64}]
wire _common_underflow_T_2 = anyRound & _common_underflow_T_1; // @[RoundAnyRawFNToRecFN.scala:166:36, :220:{32,64}]
wire _common_underflow_T_3 = roundMask[3]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:57]
wire _common_underflow_T_9 = roundMask[3]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:57, :225:49]
wire _common_underflow_T_4 = roundMask[2]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:71]
wire _common_underflow_T_5 = doShiftSigDown1 ? _common_underflow_T_3 : _common_underflow_T_4; // @[RoundAnyRawFNToRecFN.scala:120:57, :221:{30,57,71}]
wire _common_underflow_T_6 = _common_underflow_T_2 & _common_underflow_T_5; // @[RoundAnyRawFNToRecFN.scala:220:{32,72}, :221:30]
wire _common_underflow_T_8 = roundMask[4]; // @[RoundAnyRawFNToRecFN.scala:159:42, :224:49]
wire _common_underflow_T_10 = doShiftSigDown1 ? _common_underflow_T_8 : _common_underflow_T_9; // @[RoundAnyRawFNToRecFN.scala:120:57, :223:39, :224:49, :225:49]
wire _common_underflow_T_11 = ~_common_underflow_T_10; // @[RoundAnyRawFNToRecFN.scala:223:{34,39}]
wire _common_underflow_T_12 = _common_underflow_T_11; // @[RoundAnyRawFNToRecFN.scala:222:77, :223:34]
wire _common_underflow_T_13 = _common_underflow_T_12 & roundCarry; // @[RoundAnyRawFNToRecFN.scala:211:16, :222:77, :226:38]
wire _common_underflow_T_14 = _common_underflow_T_13 & roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :226:38, :227:45]
wire _common_underflow_T_15 = _common_underflow_T_14 & unboundedRange_roundIncr; // @[RoundAnyRawFNToRecFN.scala:208:46, :227:{45,60}]
wire _common_underflow_T_16 = ~_common_underflow_T_15; // @[RoundAnyRawFNToRecFN.scala:222:27, :227:60]
wire _common_underflow_T_17 = _common_underflow_T_6 & _common_underflow_T_16; // @[RoundAnyRawFNToRecFN.scala:220:72, :221:76, :222:27]
assign _common_underflow_T_18 = common_totalUnderflow | _common_underflow_T_17; // @[RoundAnyRawFNToRecFN.scala:125:37, :217:40, :221:76]
assign common_underflow = _common_underflow_T_18; // @[RoundAnyRawFNToRecFN.scala:126:37, :217:40]
assign _common_inexact_T = common_totalUnderflow | anyRound; // @[RoundAnyRawFNToRecFN.scala:125:37, :166:36, :230:49]
assign common_inexact = _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:127:37, :230:49]
wire isNaNOut = io_invalidExc_0 | io_in_isNaN_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34]
wire _commonCase_T = ~isNaNOut; // @[RoundAnyRawFNToRecFN.scala:235:34, :237:22]
wire _commonCase_T_1 = ~notNaN_isSpecialInfOut; // @[RoundAnyRawFNToRecFN.scala:236:49, :237:36]
wire _commonCase_T_2 = _commonCase_T & _commonCase_T_1; // @[RoundAnyRawFNToRecFN.scala:237:{22,33,36}]
wire _commonCase_T_3 = ~io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :237:64]
wire commonCase = _commonCase_T_2 & _commonCase_T_3; // @[RoundAnyRawFNToRecFN.scala:237:{33,61,64}]
wire overflow = commonCase & common_overflow; // @[RoundAnyRawFNToRecFN.scala:124:37, :237:61, :238:32]
wire _notNaN_isInfOut_T = overflow; // @[RoundAnyRawFNToRecFN.scala:238:32, :248:45]
wire underflow = commonCase & common_underflow; // @[RoundAnyRawFNToRecFN.scala:126:37, :237:61, :239:32]
wire _inexact_T = commonCase & common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37, :237:61, :240:43]
wire inexact = overflow | _inexact_T; // @[RoundAnyRawFNToRecFN.scala:238:32, :240:{28,43}]
wire _pegMinNonzeroMagOut_T = commonCase & common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37, :237:61, :245:20]
wire notNaN_isInfOut = notNaN_isSpecialInfOut | _notNaN_isInfOut_T; // @[RoundAnyRawFNToRecFN.scala:236:49, :248:{32,45}]
wire signOut = ~isNaNOut & io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34, :250:22]
wire _expOut_T = io_in_isZero_0 | common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:48:5, :125:37, :253:32]
wire [8:0] _expOut_T_1 = _expOut_T ? 9'h1C0 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:{18,32}]
wire [8:0] _expOut_T_2 = ~_expOut_T_1; // @[RoundAnyRawFNToRecFN.scala:253:{14,18}]
wire [8:0] _expOut_T_3 = common_expOut & _expOut_T_2; // @[RoundAnyRawFNToRecFN.scala:122:31, :252:24, :253:14]
wire [8:0] _expOut_T_7 = _expOut_T_3; // @[RoundAnyRawFNToRecFN.scala:252:24, :256:17]
wire [8:0] _expOut_T_10 = _expOut_T_7; // @[RoundAnyRawFNToRecFN.scala:256:17, :260:17]
wire [8:0] _expOut_T_11 = {2'h0, notNaN_isInfOut, 6'h0}; // @[RoundAnyRawFNToRecFN.scala:248:32, :265:18]
wire [8:0] _expOut_T_12 = ~_expOut_T_11; // @[RoundAnyRawFNToRecFN.scala:265:{14,18}]
wire [8:0] _expOut_T_13 = _expOut_T_10 & _expOut_T_12; // @[RoundAnyRawFNToRecFN.scala:260:17, :264:17, :265:14]
wire [8:0] _expOut_T_15 = _expOut_T_13; // @[RoundAnyRawFNToRecFN.scala:264:17, :268:18]
wire [8:0] _expOut_T_17 = _expOut_T_15; // @[RoundAnyRawFNToRecFN.scala:268:18, :272:15]
wire [8:0] _expOut_T_18 = notNaN_isInfOut ? 9'h180 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:248:32, :277:16]
wire [8:0] _expOut_T_19 = _expOut_T_17 | _expOut_T_18; // @[RoundAnyRawFNToRecFN.scala:272:15, :276:15, :277:16]
wire [8:0] _expOut_T_20 = isNaNOut ? 9'h1C0 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:235:34, :278:16]
wire [8:0] expOut = _expOut_T_19 | _expOut_T_20; // @[RoundAnyRawFNToRecFN.scala:276:15, :277:73, :278:16]
wire _fractOut_T = isNaNOut | io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34, :280:22]
wire _fractOut_T_1 = _fractOut_T | common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37, :280:{22,38}]
wire [22:0] _fractOut_T_2 = {isNaNOut, 22'h0}; // @[RoundAnyRawFNToRecFN.scala:235:34, :281:16]
wire [22:0] _fractOut_T_3 = _fractOut_T_1 ? _fractOut_T_2 : common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31, :280:{12,38}, :281:16]
wire [22:0] fractOut = _fractOut_T_3; // @[RoundAnyRawFNToRecFN.scala:280:12, :283:11]
wire [9:0] _io_out_T = {signOut, expOut}; // @[RoundAnyRawFNToRecFN.scala:250:22, :277:73, :286:23]
assign _io_out_T_1 = {_io_out_T, fractOut}; // @[RoundAnyRawFNToRecFN.scala:283:11, :286:{23,33}]
assign io_out_0 = _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:48:5, :286:33]
wire [1:0] _io_exceptionFlags_T = {io_invalidExc_0, 1'h0}; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:23]
wire [2:0] _io_exceptionFlags_T_1 = {_io_exceptionFlags_T, overflow}; // @[RoundAnyRawFNToRecFN.scala:238:32, :288:{23,41}]
wire [3:0] _io_exceptionFlags_T_2 = {_io_exceptionFlags_T_1, underflow}; // @[RoundAnyRawFNToRecFN.scala:239:32, :288:{41,53}]
assign _io_exceptionFlags_T_3 = {_io_exceptionFlags_T_2, inexact}; // @[RoundAnyRawFNToRecFN.scala:240:28, :288:{53,66}]
assign io_exceptionFlags_0 = _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:66]
assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_256( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_472 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File util.scala:
//******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v3.common.{MicroOp}
import boom.v3.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v3.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U}
def apply(ip: UInt, isel: UInt): SInt = {
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0).asSInt
}
}
/**
* Object to get the FP rounding mode out of a packed immediate.
*/
object ImmGenRm { def apply(ip: UInt): UInt = { return ip(2,0) } }
/**
* Object to get the FP function fype from a packed immediate.
* Note: only works if !(IS_B or IS_S)
*/
object ImmGenTyp { def apply(ip: UInt): UInt = { return ip(9,8) } }
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v3.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v3.common.MicroOp => Bool = u => true.B, flow: Boolean = true)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v3.common.BoomModule()(p)
with boom.v3.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire)
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B //!IsKilledByBranch(io.brupdate, io.enq.bits.uop)
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value) && !IsKilledByBranch(io.brupdate, out.uop) && !(io.flush && flush_fn(out.uop))
io.deq.bits := out
io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, out.uop)
// For flow queue behavior.
if (flow) {
when (io.empty) {
io.deq.valid := io.enq.valid //&& !IsKilledByBranch(io.brupdate, io.enq.bits.uop)
io.deq.bits := io.enq.bits
io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
do_deq := false.B
when (io.deq.ready) { do_enq := false.B }
}
}
private val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
}
File rename-stage.scala:
//******************************************************************************
// Copyright (c) 2012 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// RISCV Processor Datapath: Rename Logic
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//
// Supports 1-cycle and 2-cycle latencies. (aka, passthrough versus registers between ren1 and ren2).
// - ren1: read the map tables and allocate a new physical register from the freelist.
// - ren2: read the busy table for the physical operands.
//
// Ren1 data is provided as an output to be fed directly into the ROB.
package boom.v3.exu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import boom.v3.common._
import boom.v3.util._
/**
* IO bundle to interface with the Register Rename logic
*
* @param plWidth pipeline width
* @param numIntPregs number of int physical registers
* @param numFpPregs number of FP physical registers
* @param numWbPorts number of int writeback ports
* @param numWbPorts number of FP writeback ports
*/
class RenameStageIO(
val plWidth: Int,
val numPhysRegs: Int,
val numWbPorts: Int)
(implicit p: Parameters) extends BoomBundle
/**
* IO bundle to debug the rename stage
*/
class DebugRenameStageIO(val numPhysRegs: Int)(implicit p: Parameters) extends BoomBundle
{
val freelist = Bits(numPhysRegs.W)
val isprlist = Bits(numPhysRegs.W)
val busytable = UInt(numPhysRegs.W)
}
abstract class AbstractRenameStage(
plWidth: Int,
numPhysRegs: Int,
numWbPorts: Int)
(implicit p: Parameters) extends BoomModule
{
val io = IO(new Bundle {
val ren_stalls = Output(Vec(plWidth, Bool()))
val kill = Input(Bool())
val dec_fire = Input(Vec(plWidth, Bool())) // will commit state updates
val dec_uops = Input(Vec(plWidth, new MicroOp()))
// physical specifiers available AND busy/ready status available.
val ren2_mask = Vec(plWidth, Output(Bool())) // mask of valid instructions
val ren2_uops = Vec(plWidth, Output(new MicroOp()))
// branch resolution (execute)
val brupdate = Input(new BrUpdateInfo())
val dis_fire = Input(Vec(coreWidth, Bool()))
val dis_ready = Input(Bool())
// wakeup ports
val wakeups = Flipped(Vec(numWbPorts, Valid(new ExeUnitResp(xLen))))
// commit stage
val com_valids = Input(Vec(plWidth, Bool()))
val com_uops = Input(Vec(plWidth, new MicroOp()))
val rbk_valids = Input(Vec(plWidth, Bool()))
val rollback = Input(Bool())
val debug_rob_empty = Input(Bool())
val debug = Output(new DebugRenameStageIO(numPhysRegs))
})
io.ren_stalls.foreach(_ := false.B)
io.debug := DontCare
def BypassAllocations(uop: MicroOp, older_uops: Seq[MicroOp], alloc_reqs: Seq[Bool]): MicroOp
//-------------------------------------------------------------
// Pipeline State & Wires
// Stage 1
val ren1_fire = Wire(Vec(plWidth, Bool()))
val ren1_uops = Wire(Vec(plWidth, new MicroOp))
// Stage 2
val ren2_fire = io.dis_fire
val ren2_ready = io.dis_ready
val ren2_valids = Wire(Vec(plWidth, Bool()))
val ren2_uops = Wire(Vec(plWidth, new MicroOp))
val ren2_alloc_reqs = Wire(Vec(plWidth, Bool()))
//-------------------------------------------------------------
// pipeline registers
for (w <- 0 until plWidth) {
ren1_fire(w) := io.dec_fire(w)
ren1_uops(w) := io.dec_uops(w)
}
for (w <- 0 until plWidth) {
val r_valid = RegInit(false.B)
val r_uop = Reg(new MicroOp)
val next_uop = Wire(new MicroOp)
next_uop := r_uop
when (io.kill) {
r_valid := false.B
} .elsewhen (ren2_ready) {
r_valid := ren1_fire(w)
next_uop := ren1_uops(w)
} .otherwise {
r_valid := r_valid && !ren2_fire(w) // clear bit if uop gets dispatched
next_uop := r_uop
}
r_uop := GetNewUopAndBrMask(BypassAllocations(next_uop, ren2_uops, ren2_alloc_reqs), io.brupdate)
ren2_valids(w) := r_valid
ren2_uops(w) := r_uop
}
//-------------------------------------------------------------
// Outputs
io.ren2_mask := ren2_valids
}
/**
* Rename stage that connets the map table, free list, and busy table.
* Can be used in both the FP pipeline and the normal execute pipeline.
*
* @param plWidth pipeline width
* @param numWbPorts number of int writeback ports
* @param numWbPorts number of FP writeback ports
*/
class RenameStage(
plWidth: Int,
numPhysRegs: Int,
numWbPorts: Int,
float: Boolean)
(implicit p: Parameters) extends AbstractRenameStage(plWidth, numPhysRegs, numWbPorts)(p)
{
val pregSz = log2Ceil(numPhysRegs)
val rtype = if (float) RT_FLT else RT_FIX
//-------------------------------------------------------------
// Helper Functions
def BypassAllocations(uop: MicroOp, older_uops: Seq[MicroOp], alloc_reqs: Seq[Bool]): MicroOp = {
val bypassed_uop = Wire(new MicroOp)
bypassed_uop := uop
val bypass_hits_rs1 = (older_uops zip alloc_reqs) map { case (r,a) => a && r.ldst === uop.lrs1 }
val bypass_hits_rs2 = (older_uops zip alloc_reqs) map { case (r,a) => a && r.ldst === uop.lrs2 }
val bypass_hits_rs3 = (older_uops zip alloc_reqs) map { case (r,a) => a && r.ldst === uop.lrs3 }
val bypass_hits_dst = (older_uops zip alloc_reqs) map { case (r,a) => a && r.ldst === uop.ldst }
val bypass_sel_rs1 = PriorityEncoderOH(bypass_hits_rs1.reverse).reverse
val bypass_sel_rs2 = PriorityEncoderOH(bypass_hits_rs2.reverse).reverse
val bypass_sel_rs3 = PriorityEncoderOH(bypass_hits_rs3.reverse).reverse
val bypass_sel_dst = PriorityEncoderOH(bypass_hits_dst.reverse).reverse
val do_bypass_rs1 = bypass_hits_rs1.reduce(_||_)
val do_bypass_rs2 = bypass_hits_rs2.reduce(_||_)
val do_bypass_rs3 = bypass_hits_rs3.reduce(_||_)
val do_bypass_dst = bypass_hits_dst.reduce(_||_)
val bypass_pdsts = older_uops.map(_.pdst)
when (do_bypass_rs1) { bypassed_uop.prs1 := Mux1H(bypass_sel_rs1, bypass_pdsts) }
when (do_bypass_rs2) { bypassed_uop.prs2 := Mux1H(bypass_sel_rs2, bypass_pdsts) }
when (do_bypass_rs3) { bypassed_uop.prs3 := Mux1H(bypass_sel_rs3, bypass_pdsts) }
when (do_bypass_dst) { bypassed_uop.stale_pdst := Mux1H(bypass_sel_dst, bypass_pdsts) }
bypassed_uop.prs1_busy := uop.prs1_busy || do_bypass_rs1
bypassed_uop.prs2_busy := uop.prs2_busy || do_bypass_rs2
bypassed_uop.prs3_busy := uop.prs3_busy || do_bypass_rs3
if (!float) {
bypassed_uop.prs3 := DontCare
bypassed_uop.prs3_busy := false.B
}
bypassed_uop
}
//-------------------------------------------------------------
// Rename Structures
val maptable = Module(new RenameMapTable(
plWidth,
32,
numPhysRegs,
false,
float))
val freelist = Module(new RenameFreeList(
plWidth,
numPhysRegs,
if (float) 32 else 31))
val busytable = Module(new RenameBusyTable(
plWidth,
numPhysRegs,
numWbPorts,
false,
float))
val ren2_br_tags = Wire(Vec(plWidth, Valid(UInt(brTagSz.W))))
// Commit/Rollback
val com_valids = Wire(Vec(plWidth, Bool()))
val rbk_valids = Wire(Vec(plWidth, Bool()))
for (w <- 0 until plWidth) {
ren2_alloc_reqs(w) := ren2_uops(w).ldst_val && ren2_uops(w).dst_rtype === rtype && ren2_fire(w)
ren2_br_tags(w).valid := ren2_fire(w) && ren2_uops(w).allocate_brtag
com_valids(w) := io.com_uops(w).ldst_val && io.com_uops(w).dst_rtype === rtype && io.com_valids(w)
rbk_valids(w) := io.com_uops(w).ldst_val && io.com_uops(w).dst_rtype === rtype && io.rbk_valids(w)
ren2_br_tags(w).bits := ren2_uops(w).br_tag
}
//-------------------------------------------------------------
// Rename Table
// Maptable inputs.
val map_reqs = Wire(Vec(plWidth, new MapReq(lregSz)))
val remap_reqs = Wire(Vec(plWidth, new RemapReq(lregSz, pregSz)))
// Generate maptable requests.
for ((((ren1,ren2),com),w) <- (ren1_uops zip ren2_uops zip io.com_uops.reverse).zipWithIndex) {
map_reqs(w).lrs1 := ren1.lrs1
map_reqs(w).lrs2 := ren1.lrs2
map_reqs(w).lrs3 := ren1.lrs3
map_reqs(w).ldst := ren1.ldst
remap_reqs(w).ldst := Mux(io.rollback, com.ldst , ren2.ldst)
remap_reqs(w).pdst := Mux(io.rollback, com.stale_pdst, ren2.pdst)
}
ren2_alloc_reqs zip rbk_valids.reverse zip remap_reqs map {
case ((a,r),rr) => rr.valid := a || r}
// Hook up inputs.
maptable.io.map_reqs := map_reqs
maptable.io.remap_reqs := remap_reqs
maptable.io.ren_br_tags := ren2_br_tags
maptable.io.brupdate := io.brupdate
maptable.io.rollback := io.rollback
// Maptable outputs.
for ((uop, w) <- ren1_uops.zipWithIndex) {
val mappings = maptable.io.map_resps(w)
uop.prs1 := mappings.prs1
uop.prs2 := mappings.prs2
uop.prs3 := mappings.prs3 // only FP has 3rd operand
uop.stale_pdst := mappings.stale_pdst
}
//-------------------------------------------------------------
// Free List
// Freelist inputs.
freelist.io.reqs := ren2_alloc_reqs
freelist.io.dealloc_pregs zip com_valids zip rbk_valids map
{case ((d,c),r) => d.valid := c || r}
freelist.io.dealloc_pregs zip io.com_uops map
{case (d,c) => d.bits := Mux(io.rollback, c.pdst, c.stale_pdst)}
freelist.io.ren_br_tags := ren2_br_tags
freelist.io.brupdate := io.brupdate
freelist.io.debug.pipeline_empty := io.debug_rob_empty
assert (ren2_alloc_reqs zip freelist.io.alloc_pregs map {case (r,p) => !r || p.bits =/= 0.U} reduce (_&&_),
"[rename-stage] A uop is trying to allocate the zero physical register.")
// Freelist outputs.
for ((uop, w) <- ren2_uops.zipWithIndex) {
val preg = freelist.io.alloc_pregs(w).bits
uop.pdst := Mux(uop.ldst =/= 0.U || float.B, preg, 0.U)
}
//-------------------------------------------------------------
// Busy Table
busytable.io.ren_uops := ren2_uops // expects pdst to be set up.
busytable.io.rebusy_reqs := ren2_alloc_reqs
busytable.io.wb_valids := io.wakeups.map(_.valid)
busytable.io.wb_pdsts := io.wakeups.map(_.bits.uop.pdst)
assert (!(io.wakeups.map(x => x.valid && x.bits.uop.dst_rtype =/= rtype).reduce(_||_)),
"[rename] Wakeup has wrong rtype.")
for ((uop, w) <- ren2_uops.zipWithIndex) {
val busy = busytable.io.busy_resps(w)
uop.prs1_busy := uop.lrs1_rtype === rtype && busy.prs1_busy
uop.prs2_busy := uop.lrs2_rtype === rtype && busy.prs2_busy
uop.prs3_busy := uop.frs3_en && busy.prs3_busy
val valid = ren2_valids(w)
assert (!(valid && busy.prs1_busy && rtype === RT_FIX && uop.lrs1 === 0.U), "[rename] x0 is busy??")
assert (!(valid && busy.prs2_busy && rtype === RT_FIX && uop.lrs2 === 0.U), "[rename] x0 is busy??")
}
//-------------------------------------------------------------
// Outputs
for (w <- 0 until plWidth) {
val can_allocate = freelist.io.alloc_pregs(w).valid
// Push back against Decode stage if Rename1 can't proceed.
io.ren_stalls(w) := (ren2_uops(w).dst_rtype === rtype) && !can_allocate
val bypassed_uop = Wire(new MicroOp)
if (w > 0) bypassed_uop := BypassAllocations(ren2_uops(w), ren2_uops.slice(0,w), ren2_alloc_reqs.slice(0,w))
else bypassed_uop := ren2_uops(w)
io.ren2_uops(w) := GetNewUopAndBrMask(bypassed_uop, io.brupdate)
}
//-------------------------------------------------------------
// Debug signals
io.debug.freelist := freelist.io.debug.freelist
io.debug.isprlist := freelist.io.debug.isprlist
io.debug.busytable := busytable.io.debug.busytable
}
class PredRenameStage(
plWidth: Int,
numPhysRegs: Int,
numWbPorts: Int)
(implicit p: Parameters) extends AbstractRenameStage(plWidth, numPhysRegs, numWbPorts)(p)
{
def BypassAllocations(uop: MicroOp, older_uops: Seq[MicroOp], alloc_reqs: Seq[Bool]): MicroOp = {
uop
}
ren2_alloc_reqs := DontCare
val busy_table = RegInit(VecInit(0.U(ftqSz.W).asBools))
val to_busy = WireInit(VecInit(0.U(ftqSz.W).asBools))
val unbusy = WireInit(VecInit(0.U(ftqSz.W).asBools))
val current_ftq_idx = Reg(UInt(log2Ceil(ftqSz).W))
var next_ftq_idx = current_ftq_idx
for (w <- 0 until plWidth) {
io.ren2_uops(w) := ren2_uops(w)
val is_sfb_br = ren2_uops(w).is_sfb_br && ren2_fire(w)
val is_sfb_shadow = ren2_uops(w).is_sfb_shadow && ren2_fire(w)
val ftq_idx = ren2_uops(w).ftq_idx
when (is_sfb_br) {
io.ren2_uops(w).pdst := ftq_idx
to_busy(ftq_idx) := true.B
}
next_ftq_idx = Mux(is_sfb_br, ftq_idx, next_ftq_idx)
when (is_sfb_shadow) {
io.ren2_uops(w).ppred := next_ftq_idx
io.ren2_uops(w).ppred_busy := (busy_table(next_ftq_idx) || to_busy(next_ftq_idx)) && !unbusy(next_ftq_idx)
}
}
for (w <- 0 until numWbPorts) {
when (io.wakeups(w).valid) {
unbusy(io.wakeups(w).bits.uop.pdst) := true.B
}
}
current_ftq_idx := next_ftq_idx
busy_table := ((busy_table.asUInt | to_busy.asUInt) & ~unbusy.asUInt).asBools
}
| module RenameStage_3( // @[rename-stage.scala:160:7]
input clock, // @[rename-stage.scala:160:7]
input reset, // @[rename-stage.scala:160:7]
output io_ren_stalls_0, // @[rename-stage.scala:60:14]
output io_ren_stalls_1, // @[rename-stage.scala:60:14]
output io_ren_stalls_2, // @[rename-stage.scala:60:14]
input io_kill, // @[rename-stage.scala:60:14]
input io_dec_fire_0, // @[rename-stage.scala:60:14]
input io_dec_fire_1, // @[rename-stage.scala:60:14]
input io_dec_fire_2, // @[rename-stage.scala:60:14]
input [6:0] io_dec_uops_0_uopc, // @[rename-stage.scala:60:14]
input [31:0] io_dec_uops_0_inst, // @[rename-stage.scala:60:14]
input [31:0] io_dec_uops_0_debug_inst, // @[rename-stage.scala:60:14]
input io_dec_uops_0_is_rvc, // @[rename-stage.scala:60:14]
input [39:0] io_dec_uops_0_debug_pc, // @[rename-stage.scala:60:14]
input [2:0] io_dec_uops_0_iq_type, // @[rename-stage.scala:60:14]
input [9:0] io_dec_uops_0_fu_code, // @[rename-stage.scala:60:14]
input io_dec_uops_0_is_br, // @[rename-stage.scala:60:14]
input io_dec_uops_0_is_jalr, // @[rename-stage.scala:60:14]
input io_dec_uops_0_is_jal, // @[rename-stage.scala:60:14]
input io_dec_uops_0_is_sfb, // @[rename-stage.scala:60:14]
input [15:0] io_dec_uops_0_br_mask, // @[rename-stage.scala:60:14]
input [3:0] io_dec_uops_0_br_tag, // @[rename-stage.scala:60:14]
input [4:0] io_dec_uops_0_ftq_idx, // @[rename-stage.scala:60:14]
input io_dec_uops_0_edge_inst, // @[rename-stage.scala:60:14]
input [5:0] io_dec_uops_0_pc_lob, // @[rename-stage.scala:60:14]
input io_dec_uops_0_taken, // @[rename-stage.scala:60:14]
input [19:0] io_dec_uops_0_imm_packed, // @[rename-stage.scala:60:14]
input io_dec_uops_0_exception, // @[rename-stage.scala:60:14]
input [63:0] io_dec_uops_0_exc_cause, // @[rename-stage.scala:60:14]
input io_dec_uops_0_bypassable, // @[rename-stage.scala:60:14]
input [4:0] io_dec_uops_0_mem_cmd, // @[rename-stage.scala:60:14]
input [1:0] io_dec_uops_0_mem_size, // @[rename-stage.scala:60:14]
input io_dec_uops_0_mem_signed, // @[rename-stage.scala:60:14]
input io_dec_uops_0_is_fence, // @[rename-stage.scala:60:14]
input io_dec_uops_0_is_fencei, // @[rename-stage.scala:60:14]
input io_dec_uops_0_is_amo, // @[rename-stage.scala:60:14]
input io_dec_uops_0_uses_ldq, // @[rename-stage.scala:60:14]
input io_dec_uops_0_uses_stq, // @[rename-stage.scala:60:14]
input io_dec_uops_0_is_sys_pc2epc, // @[rename-stage.scala:60:14]
input io_dec_uops_0_is_unique, // @[rename-stage.scala:60:14]
input io_dec_uops_0_flush_on_commit, // @[rename-stage.scala:60:14]
input [5:0] io_dec_uops_0_ldst, // @[rename-stage.scala:60:14]
input [5:0] io_dec_uops_0_lrs1, // @[rename-stage.scala:60:14]
input [5:0] io_dec_uops_0_lrs2, // @[rename-stage.scala:60:14]
input [5:0] io_dec_uops_0_lrs3, // @[rename-stage.scala:60:14]
input io_dec_uops_0_ldst_val, // @[rename-stage.scala:60:14]
input [1:0] io_dec_uops_0_dst_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_dec_uops_0_lrs1_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_dec_uops_0_lrs2_rtype, // @[rename-stage.scala:60:14]
input io_dec_uops_0_frs3_en, // @[rename-stage.scala:60:14]
input io_dec_uops_0_fp_val, // @[rename-stage.scala:60:14]
input io_dec_uops_0_fp_single, // @[rename-stage.scala:60:14]
input io_dec_uops_0_xcpt_pf_if, // @[rename-stage.scala:60:14]
input io_dec_uops_0_xcpt_ae_if, // @[rename-stage.scala:60:14]
input io_dec_uops_0_bp_debug_if, // @[rename-stage.scala:60:14]
input io_dec_uops_0_bp_xcpt_if, // @[rename-stage.scala:60:14]
input [1:0] io_dec_uops_0_debug_fsrc, // @[rename-stage.scala:60:14]
input [6:0] io_dec_uops_1_uopc, // @[rename-stage.scala:60:14]
input [31:0] io_dec_uops_1_inst, // @[rename-stage.scala:60:14]
input [31:0] io_dec_uops_1_debug_inst, // @[rename-stage.scala:60:14]
input io_dec_uops_1_is_rvc, // @[rename-stage.scala:60:14]
input [39:0] io_dec_uops_1_debug_pc, // @[rename-stage.scala:60:14]
input [2:0] io_dec_uops_1_iq_type, // @[rename-stage.scala:60:14]
input [9:0] io_dec_uops_1_fu_code, // @[rename-stage.scala:60:14]
input io_dec_uops_1_is_br, // @[rename-stage.scala:60:14]
input io_dec_uops_1_is_jalr, // @[rename-stage.scala:60:14]
input io_dec_uops_1_is_jal, // @[rename-stage.scala:60:14]
input io_dec_uops_1_is_sfb, // @[rename-stage.scala:60:14]
input [15:0] io_dec_uops_1_br_mask, // @[rename-stage.scala:60:14]
input [3:0] io_dec_uops_1_br_tag, // @[rename-stage.scala:60:14]
input [4:0] io_dec_uops_1_ftq_idx, // @[rename-stage.scala:60:14]
input io_dec_uops_1_edge_inst, // @[rename-stage.scala:60:14]
input [5:0] io_dec_uops_1_pc_lob, // @[rename-stage.scala:60:14]
input io_dec_uops_1_taken, // @[rename-stage.scala:60:14]
input [19:0] io_dec_uops_1_imm_packed, // @[rename-stage.scala:60:14]
input io_dec_uops_1_exception, // @[rename-stage.scala:60:14]
input [63:0] io_dec_uops_1_exc_cause, // @[rename-stage.scala:60:14]
input io_dec_uops_1_bypassable, // @[rename-stage.scala:60:14]
input [4:0] io_dec_uops_1_mem_cmd, // @[rename-stage.scala:60:14]
input [1:0] io_dec_uops_1_mem_size, // @[rename-stage.scala:60:14]
input io_dec_uops_1_mem_signed, // @[rename-stage.scala:60:14]
input io_dec_uops_1_is_fence, // @[rename-stage.scala:60:14]
input io_dec_uops_1_is_fencei, // @[rename-stage.scala:60:14]
input io_dec_uops_1_is_amo, // @[rename-stage.scala:60:14]
input io_dec_uops_1_uses_ldq, // @[rename-stage.scala:60:14]
input io_dec_uops_1_uses_stq, // @[rename-stage.scala:60:14]
input io_dec_uops_1_is_sys_pc2epc, // @[rename-stage.scala:60:14]
input io_dec_uops_1_is_unique, // @[rename-stage.scala:60:14]
input io_dec_uops_1_flush_on_commit, // @[rename-stage.scala:60:14]
input [5:0] io_dec_uops_1_ldst, // @[rename-stage.scala:60:14]
input [5:0] io_dec_uops_1_lrs1, // @[rename-stage.scala:60:14]
input [5:0] io_dec_uops_1_lrs2, // @[rename-stage.scala:60:14]
input [5:0] io_dec_uops_1_lrs3, // @[rename-stage.scala:60:14]
input io_dec_uops_1_ldst_val, // @[rename-stage.scala:60:14]
input [1:0] io_dec_uops_1_dst_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_dec_uops_1_lrs1_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_dec_uops_1_lrs2_rtype, // @[rename-stage.scala:60:14]
input io_dec_uops_1_frs3_en, // @[rename-stage.scala:60:14]
input io_dec_uops_1_fp_val, // @[rename-stage.scala:60:14]
input io_dec_uops_1_fp_single, // @[rename-stage.scala:60:14]
input io_dec_uops_1_xcpt_pf_if, // @[rename-stage.scala:60:14]
input io_dec_uops_1_xcpt_ae_if, // @[rename-stage.scala:60:14]
input io_dec_uops_1_bp_debug_if, // @[rename-stage.scala:60:14]
input io_dec_uops_1_bp_xcpt_if, // @[rename-stage.scala:60:14]
input [1:0] io_dec_uops_1_debug_fsrc, // @[rename-stage.scala:60:14]
input [6:0] io_dec_uops_2_uopc, // @[rename-stage.scala:60:14]
input [31:0] io_dec_uops_2_inst, // @[rename-stage.scala:60:14]
input [31:0] io_dec_uops_2_debug_inst, // @[rename-stage.scala:60:14]
input io_dec_uops_2_is_rvc, // @[rename-stage.scala:60:14]
input [39:0] io_dec_uops_2_debug_pc, // @[rename-stage.scala:60:14]
input [2:0] io_dec_uops_2_iq_type, // @[rename-stage.scala:60:14]
input [9:0] io_dec_uops_2_fu_code, // @[rename-stage.scala:60:14]
input io_dec_uops_2_is_br, // @[rename-stage.scala:60:14]
input io_dec_uops_2_is_jalr, // @[rename-stage.scala:60:14]
input io_dec_uops_2_is_jal, // @[rename-stage.scala:60:14]
input io_dec_uops_2_is_sfb, // @[rename-stage.scala:60:14]
input [15:0] io_dec_uops_2_br_mask, // @[rename-stage.scala:60:14]
input [3:0] io_dec_uops_2_br_tag, // @[rename-stage.scala:60:14]
input [4:0] io_dec_uops_2_ftq_idx, // @[rename-stage.scala:60:14]
input io_dec_uops_2_edge_inst, // @[rename-stage.scala:60:14]
input [5:0] io_dec_uops_2_pc_lob, // @[rename-stage.scala:60:14]
input io_dec_uops_2_taken, // @[rename-stage.scala:60:14]
input [19:0] io_dec_uops_2_imm_packed, // @[rename-stage.scala:60:14]
input io_dec_uops_2_exception, // @[rename-stage.scala:60:14]
input [63:0] io_dec_uops_2_exc_cause, // @[rename-stage.scala:60:14]
input io_dec_uops_2_bypassable, // @[rename-stage.scala:60:14]
input [4:0] io_dec_uops_2_mem_cmd, // @[rename-stage.scala:60:14]
input [1:0] io_dec_uops_2_mem_size, // @[rename-stage.scala:60:14]
input io_dec_uops_2_mem_signed, // @[rename-stage.scala:60:14]
input io_dec_uops_2_is_fence, // @[rename-stage.scala:60:14]
input io_dec_uops_2_is_fencei, // @[rename-stage.scala:60:14]
input io_dec_uops_2_is_amo, // @[rename-stage.scala:60:14]
input io_dec_uops_2_uses_ldq, // @[rename-stage.scala:60:14]
input io_dec_uops_2_uses_stq, // @[rename-stage.scala:60:14]
input io_dec_uops_2_is_sys_pc2epc, // @[rename-stage.scala:60:14]
input io_dec_uops_2_is_unique, // @[rename-stage.scala:60:14]
input io_dec_uops_2_flush_on_commit, // @[rename-stage.scala:60:14]
input [5:0] io_dec_uops_2_ldst, // @[rename-stage.scala:60:14]
input [5:0] io_dec_uops_2_lrs1, // @[rename-stage.scala:60:14]
input [5:0] io_dec_uops_2_lrs2, // @[rename-stage.scala:60:14]
input [5:0] io_dec_uops_2_lrs3, // @[rename-stage.scala:60:14]
input io_dec_uops_2_ldst_val, // @[rename-stage.scala:60:14]
input [1:0] io_dec_uops_2_dst_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_dec_uops_2_lrs1_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_dec_uops_2_lrs2_rtype, // @[rename-stage.scala:60:14]
input io_dec_uops_2_frs3_en, // @[rename-stage.scala:60:14]
input io_dec_uops_2_fp_val, // @[rename-stage.scala:60:14]
input io_dec_uops_2_fp_single, // @[rename-stage.scala:60:14]
input io_dec_uops_2_xcpt_pf_if, // @[rename-stage.scala:60:14]
input io_dec_uops_2_xcpt_ae_if, // @[rename-stage.scala:60:14]
input io_dec_uops_2_bp_debug_if, // @[rename-stage.scala:60:14]
input io_dec_uops_2_bp_xcpt_if, // @[rename-stage.scala:60:14]
input [1:0] io_dec_uops_2_debug_fsrc, // @[rename-stage.scala:60:14]
output [6:0] io_ren2_uops_0_pdst, // @[rename-stage.scala:60:14]
output [6:0] io_ren2_uops_0_prs1, // @[rename-stage.scala:60:14]
output [6:0] io_ren2_uops_0_prs2, // @[rename-stage.scala:60:14]
output [6:0] io_ren2_uops_0_prs3, // @[rename-stage.scala:60:14]
output io_ren2_uops_0_prs1_busy, // @[rename-stage.scala:60:14]
output io_ren2_uops_0_prs2_busy, // @[rename-stage.scala:60:14]
output io_ren2_uops_0_prs3_busy, // @[rename-stage.scala:60:14]
output [6:0] io_ren2_uops_0_stale_pdst, // @[rename-stage.scala:60:14]
output [6:0] io_ren2_uops_1_pdst, // @[rename-stage.scala:60:14]
output [6:0] io_ren2_uops_1_prs1, // @[rename-stage.scala:60:14]
output [6:0] io_ren2_uops_1_prs2, // @[rename-stage.scala:60:14]
output [6:0] io_ren2_uops_1_prs3, // @[rename-stage.scala:60:14]
output io_ren2_uops_1_prs1_busy, // @[rename-stage.scala:60:14]
output io_ren2_uops_1_prs2_busy, // @[rename-stage.scala:60:14]
output io_ren2_uops_1_prs3_busy, // @[rename-stage.scala:60:14]
output [6:0] io_ren2_uops_1_stale_pdst, // @[rename-stage.scala:60:14]
output [6:0] io_ren2_uops_2_pdst, // @[rename-stage.scala:60:14]
output [6:0] io_ren2_uops_2_prs1, // @[rename-stage.scala:60:14]
output [6:0] io_ren2_uops_2_prs2, // @[rename-stage.scala:60:14]
output [6:0] io_ren2_uops_2_prs3, // @[rename-stage.scala:60:14]
output io_ren2_uops_2_prs1_busy, // @[rename-stage.scala:60:14]
output io_ren2_uops_2_prs2_busy, // @[rename-stage.scala:60:14]
output io_ren2_uops_2_prs3_busy, // @[rename-stage.scala:60:14]
output [6:0] io_ren2_uops_2_stale_pdst, // @[rename-stage.scala:60:14]
input [15:0] io_brupdate_b1_resolve_mask, // @[rename-stage.scala:60:14]
input [15:0] io_brupdate_b1_mispredict_mask, // @[rename-stage.scala:60:14]
input [6:0] io_brupdate_b2_uop_uopc, // @[rename-stage.scala:60:14]
input [31:0] io_brupdate_b2_uop_inst, // @[rename-stage.scala:60:14]
input [31:0] io_brupdate_b2_uop_debug_inst, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_is_rvc, // @[rename-stage.scala:60:14]
input [39:0] io_brupdate_b2_uop_debug_pc, // @[rename-stage.scala:60:14]
input [2:0] io_brupdate_b2_uop_iq_type, // @[rename-stage.scala:60:14]
input [9:0] io_brupdate_b2_uop_fu_code, // @[rename-stage.scala:60:14]
input [3:0] io_brupdate_b2_uop_ctrl_br_type, // @[rename-stage.scala:60:14]
input [1:0] io_brupdate_b2_uop_ctrl_op1_sel, // @[rename-stage.scala:60:14]
input [2:0] io_brupdate_b2_uop_ctrl_op2_sel, // @[rename-stage.scala:60:14]
input [2:0] io_brupdate_b2_uop_ctrl_imm_sel, // @[rename-stage.scala:60:14]
input [4:0] io_brupdate_b2_uop_ctrl_op_fcn, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_ctrl_fcn_dw, // @[rename-stage.scala:60:14]
input [2:0] io_brupdate_b2_uop_ctrl_csr_cmd, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_ctrl_is_load, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_ctrl_is_sta, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_ctrl_is_std, // @[rename-stage.scala:60:14]
input [1:0] io_brupdate_b2_uop_iw_state, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_iw_p1_poisoned, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_iw_p2_poisoned, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_is_br, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_is_jalr, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_is_jal, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_is_sfb, // @[rename-stage.scala:60:14]
input [15:0] io_brupdate_b2_uop_br_mask, // @[rename-stage.scala:60:14]
input [3:0] io_brupdate_b2_uop_br_tag, // @[rename-stage.scala:60:14]
input [4:0] io_brupdate_b2_uop_ftq_idx, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_edge_inst, // @[rename-stage.scala:60:14]
input [5:0] io_brupdate_b2_uop_pc_lob, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_taken, // @[rename-stage.scala:60:14]
input [19:0] io_brupdate_b2_uop_imm_packed, // @[rename-stage.scala:60:14]
input [11:0] io_brupdate_b2_uop_csr_addr, // @[rename-stage.scala:60:14]
input [6:0] io_brupdate_b2_uop_rob_idx, // @[rename-stage.scala:60:14]
input [4:0] io_brupdate_b2_uop_ldq_idx, // @[rename-stage.scala:60:14]
input [4:0] io_brupdate_b2_uop_stq_idx, // @[rename-stage.scala:60:14]
input [1:0] io_brupdate_b2_uop_rxq_idx, // @[rename-stage.scala:60:14]
input [6:0] io_brupdate_b2_uop_pdst, // @[rename-stage.scala:60:14]
input [6:0] io_brupdate_b2_uop_prs1, // @[rename-stage.scala:60:14]
input [6:0] io_brupdate_b2_uop_prs2, // @[rename-stage.scala:60:14]
input [6:0] io_brupdate_b2_uop_prs3, // @[rename-stage.scala:60:14]
input [4:0] io_brupdate_b2_uop_ppred, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_prs1_busy, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_prs2_busy, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_prs3_busy, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_ppred_busy, // @[rename-stage.scala:60:14]
input [6:0] io_brupdate_b2_uop_stale_pdst, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_exception, // @[rename-stage.scala:60:14]
input [63:0] io_brupdate_b2_uop_exc_cause, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_bypassable, // @[rename-stage.scala:60:14]
input [4:0] io_brupdate_b2_uop_mem_cmd, // @[rename-stage.scala:60:14]
input [1:0] io_brupdate_b2_uop_mem_size, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_mem_signed, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_is_fence, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_is_fencei, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_is_amo, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_uses_ldq, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_uses_stq, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_is_sys_pc2epc, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_is_unique, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_flush_on_commit, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_ldst_is_rs1, // @[rename-stage.scala:60:14]
input [5:0] io_brupdate_b2_uop_ldst, // @[rename-stage.scala:60:14]
input [5:0] io_brupdate_b2_uop_lrs1, // @[rename-stage.scala:60:14]
input [5:0] io_brupdate_b2_uop_lrs2, // @[rename-stage.scala:60:14]
input [5:0] io_brupdate_b2_uop_lrs3, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_ldst_val, // @[rename-stage.scala:60:14]
input [1:0] io_brupdate_b2_uop_dst_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_frs3_en, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_fp_val, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_fp_single, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_xcpt_pf_if, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_xcpt_ae_if, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_xcpt_ma_if, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_bp_debug_if, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_bp_xcpt_if, // @[rename-stage.scala:60:14]
input [1:0] io_brupdate_b2_uop_debug_fsrc, // @[rename-stage.scala:60:14]
input [1:0] io_brupdate_b2_uop_debug_tsrc, // @[rename-stage.scala:60:14]
input io_brupdate_b2_valid, // @[rename-stage.scala:60:14]
input io_brupdate_b2_mispredict, // @[rename-stage.scala:60:14]
input io_brupdate_b2_taken, // @[rename-stage.scala:60:14]
input [2:0] io_brupdate_b2_cfi_type, // @[rename-stage.scala:60:14]
input [1:0] io_brupdate_b2_pc_sel, // @[rename-stage.scala:60:14]
input [39:0] io_brupdate_b2_jalr_target, // @[rename-stage.scala:60:14]
input [20:0] io_brupdate_b2_target_offset, // @[rename-stage.scala:60:14]
input io_dis_fire_0, // @[rename-stage.scala:60:14]
input io_dis_fire_1, // @[rename-stage.scala:60:14]
input io_dis_fire_2, // @[rename-stage.scala:60:14]
input io_dis_ready, // @[rename-stage.scala:60:14]
input io_wakeups_0_valid, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_0_bits_uop_uopc, // @[rename-stage.scala:60:14]
input [31:0] io_wakeups_0_bits_uop_inst, // @[rename-stage.scala:60:14]
input [31:0] io_wakeups_0_bits_uop_debug_inst, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_is_rvc, // @[rename-stage.scala:60:14]
input [39:0] io_wakeups_0_bits_uop_debug_pc, // @[rename-stage.scala:60:14]
input [2:0] io_wakeups_0_bits_uop_iq_type, // @[rename-stage.scala:60:14]
input [9:0] io_wakeups_0_bits_uop_fu_code, // @[rename-stage.scala:60:14]
input [3:0] io_wakeups_0_bits_uop_ctrl_br_type, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_0_bits_uop_ctrl_op1_sel, // @[rename-stage.scala:60:14]
input [2:0] io_wakeups_0_bits_uop_ctrl_op2_sel, // @[rename-stage.scala:60:14]
input [2:0] io_wakeups_0_bits_uop_ctrl_imm_sel, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_0_bits_uop_ctrl_op_fcn, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_ctrl_fcn_dw, // @[rename-stage.scala:60:14]
input [2:0] io_wakeups_0_bits_uop_ctrl_csr_cmd, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_ctrl_is_load, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_ctrl_is_sta, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_ctrl_is_std, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_0_bits_uop_iw_state, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_iw_p1_poisoned, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_iw_p2_poisoned, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_is_br, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_is_jalr, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_is_jal, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_is_sfb, // @[rename-stage.scala:60:14]
input [15:0] io_wakeups_0_bits_uop_br_mask, // @[rename-stage.scala:60:14]
input [3:0] io_wakeups_0_bits_uop_br_tag, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_0_bits_uop_ftq_idx, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_edge_inst, // @[rename-stage.scala:60:14]
input [5:0] io_wakeups_0_bits_uop_pc_lob, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_taken, // @[rename-stage.scala:60:14]
input [19:0] io_wakeups_0_bits_uop_imm_packed, // @[rename-stage.scala:60:14]
input [11:0] io_wakeups_0_bits_uop_csr_addr, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_0_bits_uop_rob_idx, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_0_bits_uop_ldq_idx, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_0_bits_uop_stq_idx, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_0_bits_uop_rxq_idx, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_0_bits_uop_pdst, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_0_bits_uop_prs1, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_0_bits_uop_prs2, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_0_bits_uop_prs3, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_0_bits_uop_ppred, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_prs1_busy, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_prs2_busy, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_prs3_busy, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_ppred_busy, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_0_bits_uop_stale_pdst, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_exception, // @[rename-stage.scala:60:14]
input [63:0] io_wakeups_0_bits_uop_exc_cause, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_bypassable, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_0_bits_uop_mem_cmd, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_0_bits_uop_mem_size, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_mem_signed, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_is_fence, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_is_fencei, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_is_amo, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_uses_ldq, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_uses_stq, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_is_sys_pc2epc, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_is_unique, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_flush_on_commit, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_ldst_is_rs1, // @[rename-stage.scala:60:14]
input [5:0] io_wakeups_0_bits_uop_ldst, // @[rename-stage.scala:60:14]
input [5:0] io_wakeups_0_bits_uop_lrs1, // @[rename-stage.scala:60:14]
input [5:0] io_wakeups_0_bits_uop_lrs2, // @[rename-stage.scala:60:14]
input [5:0] io_wakeups_0_bits_uop_lrs3, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_ldst_val, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_0_bits_uop_dst_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_0_bits_uop_lrs1_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_0_bits_uop_lrs2_rtype, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_frs3_en, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_fp_val, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_fp_single, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_xcpt_pf_if, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_xcpt_ae_if, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_xcpt_ma_if, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_bp_debug_if, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_uop_bp_xcpt_if, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_0_bits_uop_debug_fsrc, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_0_bits_uop_debug_tsrc, // @[rename-stage.scala:60:14]
input [63:0] io_wakeups_0_bits_data, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_predicated, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_valid, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_0_bits_fflags_bits_uop_uopc, // @[rename-stage.scala:60:14]
input [31:0] io_wakeups_0_bits_fflags_bits_uop_inst, // @[rename-stage.scala:60:14]
input [31:0] io_wakeups_0_bits_fflags_bits_uop_debug_inst, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_is_rvc, // @[rename-stage.scala:60:14]
input [39:0] io_wakeups_0_bits_fflags_bits_uop_debug_pc, // @[rename-stage.scala:60:14]
input [2:0] io_wakeups_0_bits_fflags_bits_uop_iq_type, // @[rename-stage.scala:60:14]
input [9:0] io_wakeups_0_bits_fflags_bits_uop_fu_code, // @[rename-stage.scala:60:14]
input [3:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_br_type, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_op1_sel, // @[rename-stage.scala:60:14]
input [2:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_op2_sel, // @[rename-stage.scala:60:14]
input [2:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_imm_sel, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_op_fcn, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_ctrl_fcn_dw, // @[rename-stage.scala:60:14]
input [2:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_csr_cmd, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_ctrl_is_load, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_ctrl_is_sta, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_ctrl_is_std, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_0_bits_fflags_bits_uop_iw_state, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_iw_p1_poisoned, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_iw_p2_poisoned, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_is_br, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_is_jalr, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_is_jal, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_is_sfb, // @[rename-stage.scala:60:14]
input [15:0] io_wakeups_0_bits_fflags_bits_uop_br_mask, // @[rename-stage.scala:60:14]
input [3:0] io_wakeups_0_bits_fflags_bits_uop_br_tag, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_0_bits_fflags_bits_uop_ftq_idx, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_edge_inst, // @[rename-stage.scala:60:14]
input [5:0] io_wakeups_0_bits_fflags_bits_uop_pc_lob, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_taken, // @[rename-stage.scala:60:14]
input [19:0] io_wakeups_0_bits_fflags_bits_uop_imm_packed, // @[rename-stage.scala:60:14]
input [11:0] io_wakeups_0_bits_fflags_bits_uop_csr_addr, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_0_bits_fflags_bits_uop_rob_idx, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_0_bits_fflags_bits_uop_ldq_idx, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_0_bits_fflags_bits_uop_stq_idx, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_0_bits_fflags_bits_uop_rxq_idx, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_0_bits_fflags_bits_uop_pdst, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_0_bits_fflags_bits_uop_prs1, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_0_bits_fflags_bits_uop_prs2, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_0_bits_fflags_bits_uop_prs3, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_0_bits_fflags_bits_uop_ppred, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_prs1_busy, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_prs2_busy, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_prs3_busy, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_ppred_busy, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_0_bits_fflags_bits_uop_stale_pdst, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_exception, // @[rename-stage.scala:60:14]
input [63:0] io_wakeups_0_bits_fflags_bits_uop_exc_cause, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_bypassable, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_0_bits_fflags_bits_uop_mem_cmd, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_0_bits_fflags_bits_uop_mem_size, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_mem_signed, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_is_fence, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_is_fencei, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_is_amo, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_uses_ldq, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_uses_stq, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_is_sys_pc2epc, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_is_unique, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_flush_on_commit, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_ldst_is_rs1, // @[rename-stage.scala:60:14]
input [5:0] io_wakeups_0_bits_fflags_bits_uop_ldst, // @[rename-stage.scala:60:14]
input [5:0] io_wakeups_0_bits_fflags_bits_uop_lrs1, // @[rename-stage.scala:60:14]
input [5:0] io_wakeups_0_bits_fflags_bits_uop_lrs2, // @[rename-stage.scala:60:14]
input [5:0] io_wakeups_0_bits_fflags_bits_uop_lrs3, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_ldst_val, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_0_bits_fflags_bits_uop_dst_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_0_bits_fflags_bits_uop_lrs1_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_0_bits_fflags_bits_uop_lrs2_rtype, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_frs3_en, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_fp_val, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_fp_single, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_xcpt_pf_if, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_xcpt_ae_if, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_xcpt_ma_if, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_bp_debug_if, // @[rename-stage.scala:60:14]
input io_wakeups_0_bits_fflags_bits_uop_bp_xcpt_if, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_0_bits_fflags_bits_uop_debug_fsrc, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_0_bits_fflags_bits_uop_debug_tsrc, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_0_bits_fflags_bits_flags, // @[rename-stage.scala:60:14]
input io_wakeups_1_valid, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_1_bits_uop_uopc, // @[rename-stage.scala:60:14]
input [31:0] io_wakeups_1_bits_uop_inst, // @[rename-stage.scala:60:14]
input [31:0] io_wakeups_1_bits_uop_debug_inst, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_is_rvc, // @[rename-stage.scala:60:14]
input [39:0] io_wakeups_1_bits_uop_debug_pc, // @[rename-stage.scala:60:14]
input [2:0] io_wakeups_1_bits_uop_iq_type, // @[rename-stage.scala:60:14]
input [9:0] io_wakeups_1_bits_uop_fu_code, // @[rename-stage.scala:60:14]
input [3:0] io_wakeups_1_bits_uop_ctrl_br_type, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_1_bits_uop_ctrl_op1_sel, // @[rename-stage.scala:60:14]
input [2:0] io_wakeups_1_bits_uop_ctrl_op2_sel, // @[rename-stage.scala:60:14]
input [2:0] io_wakeups_1_bits_uop_ctrl_imm_sel, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_1_bits_uop_ctrl_op_fcn, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_ctrl_fcn_dw, // @[rename-stage.scala:60:14]
input [2:0] io_wakeups_1_bits_uop_ctrl_csr_cmd, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_ctrl_is_load, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_ctrl_is_sta, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_ctrl_is_std, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_1_bits_uop_iw_state, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_iw_p1_poisoned, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_iw_p2_poisoned, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_is_br, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_is_jalr, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_is_jal, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_is_sfb, // @[rename-stage.scala:60:14]
input [15:0] io_wakeups_1_bits_uop_br_mask, // @[rename-stage.scala:60:14]
input [3:0] io_wakeups_1_bits_uop_br_tag, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_1_bits_uop_ftq_idx, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_edge_inst, // @[rename-stage.scala:60:14]
input [5:0] io_wakeups_1_bits_uop_pc_lob, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_taken, // @[rename-stage.scala:60:14]
input [19:0] io_wakeups_1_bits_uop_imm_packed, // @[rename-stage.scala:60:14]
input [11:0] io_wakeups_1_bits_uop_csr_addr, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_1_bits_uop_rob_idx, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_1_bits_uop_ldq_idx, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_1_bits_uop_stq_idx, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_1_bits_uop_rxq_idx, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_1_bits_uop_pdst, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_1_bits_uop_prs1, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_1_bits_uop_prs2, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_1_bits_uop_prs3, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_1_bits_uop_ppred, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_prs1_busy, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_prs2_busy, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_prs3_busy, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_ppred_busy, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_1_bits_uop_stale_pdst, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_exception, // @[rename-stage.scala:60:14]
input [63:0] io_wakeups_1_bits_uop_exc_cause, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_bypassable, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_1_bits_uop_mem_cmd, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_1_bits_uop_mem_size, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_mem_signed, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_is_fence, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_is_fencei, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_is_amo, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_uses_ldq, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_uses_stq, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_is_sys_pc2epc, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_is_unique, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_flush_on_commit, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_ldst_is_rs1, // @[rename-stage.scala:60:14]
input [5:0] io_wakeups_1_bits_uop_ldst, // @[rename-stage.scala:60:14]
input [5:0] io_wakeups_1_bits_uop_lrs1, // @[rename-stage.scala:60:14]
input [5:0] io_wakeups_1_bits_uop_lrs2, // @[rename-stage.scala:60:14]
input [5:0] io_wakeups_1_bits_uop_lrs3, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_ldst_val, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_1_bits_uop_dst_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_1_bits_uop_lrs1_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_1_bits_uop_lrs2_rtype, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_frs3_en, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_fp_val, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_fp_single, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_xcpt_pf_if, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_xcpt_ae_if, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_xcpt_ma_if, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_bp_debug_if, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_uop_bp_xcpt_if, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_1_bits_uop_debug_fsrc, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_1_bits_uop_debug_tsrc, // @[rename-stage.scala:60:14]
input [63:0] io_wakeups_1_bits_data, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_valid, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_1_bits_fflags_bits_uop_uopc, // @[rename-stage.scala:60:14]
input [31:0] io_wakeups_1_bits_fflags_bits_uop_inst, // @[rename-stage.scala:60:14]
input [31:0] io_wakeups_1_bits_fflags_bits_uop_debug_inst, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_is_rvc, // @[rename-stage.scala:60:14]
input [39:0] io_wakeups_1_bits_fflags_bits_uop_debug_pc, // @[rename-stage.scala:60:14]
input [2:0] io_wakeups_1_bits_fflags_bits_uop_iq_type, // @[rename-stage.scala:60:14]
input [9:0] io_wakeups_1_bits_fflags_bits_uop_fu_code, // @[rename-stage.scala:60:14]
input [3:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_br_type, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_op1_sel, // @[rename-stage.scala:60:14]
input [2:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_op2_sel, // @[rename-stage.scala:60:14]
input [2:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_imm_sel, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_op_fcn, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_ctrl_fcn_dw, // @[rename-stage.scala:60:14]
input [2:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_csr_cmd, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_ctrl_is_load, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_ctrl_is_sta, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_ctrl_is_std, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_1_bits_fflags_bits_uop_iw_state, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_iw_p1_poisoned, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_iw_p2_poisoned, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_is_br, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_is_jalr, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_is_jal, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_is_sfb, // @[rename-stage.scala:60:14]
input [15:0] io_wakeups_1_bits_fflags_bits_uop_br_mask, // @[rename-stage.scala:60:14]
input [3:0] io_wakeups_1_bits_fflags_bits_uop_br_tag, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_1_bits_fflags_bits_uop_ftq_idx, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_edge_inst, // @[rename-stage.scala:60:14]
input [5:0] io_wakeups_1_bits_fflags_bits_uop_pc_lob, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_taken, // @[rename-stage.scala:60:14]
input [19:0] io_wakeups_1_bits_fflags_bits_uop_imm_packed, // @[rename-stage.scala:60:14]
input [11:0] io_wakeups_1_bits_fflags_bits_uop_csr_addr, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_1_bits_fflags_bits_uop_rob_idx, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_1_bits_fflags_bits_uop_ldq_idx, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_1_bits_fflags_bits_uop_stq_idx, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_1_bits_fflags_bits_uop_rxq_idx, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_1_bits_fflags_bits_uop_pdst, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_1_bits_fflags_bits_uop_prs1, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_1_bits_fflags_bits_uop_prs2, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_1_bits_fflags_bits_uop_prs3, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_1_bits_fflags_bits_uop_ppred, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_prs1_busy, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_prs2_busy, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_prs3_busy, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_ppred_busy, // @[rename-stage.scala:60:14]
input [6:0] io_wakeups_1_bits_fflags_bits_uop_stale_pdst, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_exception, // @[rename-stage.scala:60:14]
input [63:0] io_wakeups_1_bits_fflags_bits_uop_exc_cause, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_bypassable, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_1_bits_fflags_bits_uop_mem_cmd, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_1_bits_fflags_bits_uop_mem_size, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_mem_signed, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_is_fence, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_is_fencei, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_is_amo, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_uses_ldq, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_uses_stq, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_is_sys_pc2epc, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_is_unique, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_flush_on_commit, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_ldst_is_rs1, // @[rename-stage.scala:60:14]
input [5:0] io_wakeups_1_bits_fflags_bits_uop_ldst, // @[rename-stage.scala:60:14]
input [5:0] io_wakeups_1_bits_fflags_bits_uop_lrs1, // @[rename-stage.scala:60:14]
input [5:0] io_wakeups_1_bits_fflags_bits_uop_lrs2, // @[rename-stage.scala:60:14]
input [5:0] io_wakeups_1_bits_fflags_bits_uop_lrs3, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_ldst_val, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_1_bits_fflags_bits_uop_dst_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_1_bits_fflags_bits_uop_lrs1_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_1_bits_fflags_bits_uop_lrs2_rtype, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_frs3_en, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_fp_val, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_fp_single, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_xcpt_pf_if, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_xcpt_ae_if, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_xcpt_ma_if, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_bp_debug_if, // @[rename-stage.scala:60:14]
input io_wakeups_1_bits_fflags_bits_uop_bp_xcpt_if, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_1_bits_fflags_bits_uop_debug_fsrc, // @[rename-stage.scala:60:14]
input [1:0] io_wakeups_1_bits_fflags_bits_uop_debug_tsrc, // @[rename-stage.scala:60:14]
input [4:0] io_wakeups_1_bits_fflags_bits_flags, // @[rename-stage.scala:60:14]
input io_com_valids_0, // @[rename-stage.scala:60:14]
input io_com_valids_1, // @[rename-stage.scala:60:14]
input io_com_valids_2, // @[rename-stage.scala:60:14]
input [6:0] io_com_uops_0_uopc, // @[rename-stage.scala:60:14]
input [31:0] io_com_uops_0_inst, // @[rename-stage.scala:60:14]
input [31:0] io_com_uops_0_debug_inst, // @[rename-stage.scala:60:14]
input io_com_uops_0_is_rvc, // @[rename-stage.scala:60:14]
input [39:0] io_com_uops_0_debug_pc, // @[rename-stage.scala:60:14]
input [2:0] io_com_uops_0_iq_type, // @[rename-stage.scala:60:14]
input [9:0] io_com_uops_0_fu_code, // @[rename-stage.scala:60:14]
input [3:0] io_com_uops_0_ctrl_br_type, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_0_ctrl_op1_sel, // @[rename-stage.scala:60:14]
input [2:0] io_com_uops_0_ctrl_op2_sel, // @[rename-stage.scala:60:14]
input [2:0] io_com_uops_0_ctrl_imm_sel, // @[rename-stage.scala:60:14]
input [4:0] io_com_uops_0_ctrl_op_fcn, // @[rename-stage.scala:60:14]
input io_com_uops_0_ctrl_fcn_dw, // @[rename-stage.scala:60:14]
input [2:0] io_com_uops_0_ctrl_csr_cmd, // @[rename-stage.scala:60:14]
input io_com_uops_0_ctrl_is_load, // @[rename-stage.scala:60:14]
input io_com_uops_0_ctrl_is_sta, // @[rename-stage.scala:60:14]
input io_com_uops_0_ctrl_is_std, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_0_iw_state, // @[rename-stage.scala:60:14]
input io_com_uops_0_iw_p1_poisoned, // @[rename-stage.scala:60:14]
input io_com_uops_0_iw_p2_poisoned, // @[rename-stage.scala:60:14]
input io_com_uops_0_is_br, // @[rename-stage.scala:60:14]
input io_com_uops_0_is_jalr, // @[rename-stage.scala:60:14]
input io_com_uops_0_is_jal, // @[rename-stage.scala:60:14]
input io_com_uops_0_is_sfb, // @[rename-stage.scala:60:14]
input [15:0] io_com_uops_0_br_mask, // @[rename-stage.scala:60:14]
input [3:0] io_com_uops_0_br_tag, // @[rename-stage.scala:60:14]
input [4:0] io_com_uops_0_ftq_idx, // @[rename-stage.scala:60:14]
input io_com_uops_0_edge_inst, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_0_pc_lob, // @[rename-stage.scala:60:14]
input io_com_uops_0_taken, // @[rename-stage.scala:60:14]
input [19:0] io_com_uops_0_imm_packed, // @[rename-stage.scala:60:14]
input [11:0] io_com_uops_0_csr_addr, // @[rename-stage.scala:60:14]
input [6:0] io_com_uops_0_rob_idx, // @[rename-stage.scala:60:14]
input [4:0] io_com_uops_0_ldq_idx, // @[rename-stage.scala:60:14]
input [4:0] io_com_uops_0_stq_idx, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_0_rxq_idx, // @[rename-stage.scala:60:14]
input [6:0] io_com_uops_0_pdst, // @[rename-stage.scala:60:14]
input [6:0] io_com_uops_0_prs1, // @[rename-stage.scala:60:14]
input [6:0] io_com_uops_0_prs2, // @[rename-stage.scala:60:14]
input [6:0] io_com_uops_0_prs3, // @[rename-stage.scala:60:14]
input [4:0] io_com_uops_0_ppred, // @[rename-stage.scala:60:14]
input io_com_uops_0_prs1_busy, // @[rename-stage.scala:60:14]
input io_com_uops_0_prs2_busy, // @[rename-stage.scala:60:14]
input io_com_uops_0_prs3_busy, // @[rename-stage.scala:60:14]
input io_com_uops_0_ppred_busy, // @[rename-stage.scala:60:14]
input [6:0] io_com_uops_0_stale_pdst, // @[rename-stage.scala:60:14]
input io_com_uops_0_exception, // @[rename-stage.scala:60:14]
input [63:0] io_com_uops_0_exc_cause, // @[rename-stage.scala:60:14]
input io_com_uops_0_bypassable, // @[rename-stage.scala:60:14]
input [4:0] io_com_uops_0_mem_cmd, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_0_mem_size, // @[rename-stage.scala:60:14]
input io_com_uops_0_mem_signed, // @[rename-stage.scala:60:14]
input io_com_uops_0_is_fence, // @[rename-stage.scala:60:14]
input io_com_uops_0_is_fencei, // @[rename-stage.scala:60:14]
input io_com_uops_0_is_amo, // @[rename-stage.scala:60:14]
input io_com_uops_0_uses_ldq, // @[rename-stage.scala:60:14]
input io_com_uops_0_uses_stq, // @[rename-stage.scala:60:14]
input io_com_uops_0_is_sys_pc2epc, // @[rename-stage.scala:60:14]
input io_com_uops_0_is_unique, // @[rename-stage.scala:60:14]
input io_com_uops_0_flush_on_commit, // @[rename-stage.scala:60:14]
input io_com_uops_0_ldst_is_rs1, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_0_ldst, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_0_lrs1, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_0_lrs2, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_0_lrs3, // @[rename-stage.scala:60:14]
input io_com_uops_0_ldst_val, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_0_dst_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_0_lrs1_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_0_lrs2_rtype, // @[rename-stage.scala:60:14]
input io_com_uops_0_frs3_en, // @[rename-stage.scala:60:14]
input io_com_uops_0_fp_val, // @[rename-stage.scala:60:14]
input io_com_uops_0_fp_single, // @[rename-stage.scala:60:14]
input io_com_uops_0_xcpt_pf_if, // @[rename-stage.scala:60:14]
input io_com_uops_0_xcpt_ae_if, // @[rename-stage.scala:60:14]
input io_com_uops_0_xcpt_ma_if, // @[rename-stage.scala:60:14]
input io_com_uops_0_bp_debug_if, // @[rename-stage.scala:60:14]
input io_com_uops_0_bp_xcpt_if, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_0_debug_fsrc, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_0_debug_tsrc, // @[rename-stage.scala:60:14]
input [6:0] io_com_uops_1_uopc, // @[rename-stage.scala:60:14]
input [31:0] io_com_uops_1_inst, // @[rename-stage.scala:60:14]
input [31:0] io_com_uops_1_debug_inst, // @[rename-stage.scala:60:14]
input io_com_uops_1_is_rvc, // @[rename-stage.scala:60:14]
input [39:0] io_com_uops_1_debug_pc, // @[rename-stage.scala:60:14]
input [2:0] io_com_uops_1_iq_type, // @[rename-stage.scala:60:14]
input [9:0] io_com_uops_1_fu_code, // @[rename-stage.scala:60:14]
input [3:0] io_com_uops_1_ctrl_br_type, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_1_ctrl_op1_sel, // @[rename-stage.scala:60:14]
input [2:0] io_com_uops_1_ctrl_op2_sel, // @[rename-stage.scala:60:14]
input [2:0] io_com_uops_1_ctrl_imm_sel, // @[rename-stage.scala:60:14]
input [4:0] io_com_uops_1_ctrl_op_fcn, // @[rename-stage.scala:60:14]
input io_com_uops_1_ctrl_fcn_dw, // @[rename-stage.scala:60:14]
input [2:0] io_com_uops_1_ctrl_csr_cmd, // @[rename-stage.scala:60:14]
input io_com_uops_1_ctrl_is_load, // @[rename-stage.scala:60:14]
input io_com_uops_1_ctrl_is_sta, // @[rename-stage.scala:60:14]
input io_com_uops_1_ctrl_is_std, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_1_iw_state, // @[rename-stage.scala:60:14]
input io_com_uops_1_iw_p1_poisoned, // @[rename-stage.scala:60:14]
input io_com_uops_1_iw_p2_poisoned, // @[rename-stage.scala:60:14]
input io_com_uops_1_is_br, // @[rename-stage.scala:60:14]
input io_com_uops_1_is_jalr, // @[rename-stage.scala:60:14]
input io_com_uops_1_is_jal, // @[rename-stage.scala:60:14]
input io_com_uops_1_is_sfb, // @[rename-stage.scala:60:14]
input [15:0] io_com_uops_1_br_mask, // @[rename-stage.scala:60:14]
input [3:0] io_com_uops_1_br_tag, // @[rename-stage.scala:60:14]
input [4:0] io_com_uops_1_ftq_idx, // @[rename-stage.scala:60:14]
input io_com_uops_1_edge_inst, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_1_pc_lob, // @[rename-stage.scala:60:14]
input io_com_uops_1_taken, // @[rename-stage.scala:60:14]
input [19:0] io_com_uops_1_imm_packed, // @[rename-stage.scala:60:14]
input [11:0] io_com_uops_1_csr_addr, // @[rename-stage.scala:60:14]
input [6:0] io_com_uops_1_rob_idx, // @[rename-stage.scala:60:14]
input [4:0] io_com_uops_1_ldq_idx, // @[rename-stage.scala:60:14]
input [4:0] io_com_uops_1_stq_idx, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_1_rxq_idx, // @[rename-stage.scala:60:14]
input [6:0] io_com_uops_1_pdst, // @[rename-stage.scala:60:14]
input [6:0] io_com_uops_1_prs1, // @[rename-stage.scala:60:14]
input [6:0] io_com_uops_1_prs2, // @[rename-stage.scala:60:14]
input [6:0] io_com_uops_1_prs3, // @[rename-stage.scala:60:14]
input [4:0] io_com_uops_1_ppred, // @[rename-stage.scala:60:14]
input io_com_uops_1_prs1_busy, // @[rename-stage.scala:60:14]
input io_com_uops_1_prs2_busy, // @[rename-stage.scala:60:14]
input io_com_uops_1_prs3_busy, // @[rename-stage.scala:60:14]
input io_com_uops_1_ppred_busy, // @[rename-stage.scala:60:14]
input [6:0] io_com_uops_1_stale_pdst, // @[rename-stage.scala:60:14]
input io_com_uops_1_exception, // @[rename-stage.scala:60:14]
input [63:0] io_com_uops_1_exc_cause, // @[rename-stage.scala:60:14]
input io_com_uops_1_bypassable, // @[rename-stage.scala:60:14]
input [4:0] io_com_uops_1_mem_cmd, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_1_mem_size, // @[rename-stage.scala:60:14]
input io_com_uops_1_mem_signed, // @[rename-stage.scala:60:14]
input io_com_uops_1_is_fence, // @[rename-stage.scala:60:14]
input io_com_uops_1_is_fencei, // @[rename-stage.scala:60:14]
input io_com_uops_1_is_amo, // @[rename-stage.scala:60:14]
input io_com_uops_1_uses_ldq, // @[rename-stage.scala:60:14]
input io_com_uops_1_uses_stq, // @[rename-stage.scala:60:14]
input io_com_uops_1_is_sys_pc2epc, // @[rename-stage.scala:60:14]
input io_com_uops_1_is_unique, // @[rename-stage.scala:60:14]
input io_com_uops_1_flush_on_commit, // @[rename-stage.scala:60:14]
input io_com_uops_1_ldst_is_rs1, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_1_ldst, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_1_lrs1, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_1_lrs2, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_1_lrs3, // @[rename-stage.scala:60:14]
input io_com_uops_1_ldst_val, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_1_dst_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_1_lrs1_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_1_lrs2_rtype, // @[rename-stage.scala:60:14]
input io_com_uops_1_frs3_en, // @[rename-stage.scala:60:14]
input io_com_uops_1_fp_val, // @[rename-stage.scala:60:14]
input io_com_uops_1_fp_single, // @[rename-stage.scala:60:14]
input io_com_uops_1_xcpt_pf_if, // @[rename-stage.scala:60:14]
input io_com_uops_1_xcpt_ae_if, // @[rename-stage.scala:60:14]
input io_com_uops_1_xcpt_ma_if, // @[rename-stage.scala:60:14]
input io_com_uops_1_bp_debug_if, // @[rename-stage.scala:60:14]
input io_com_uops_1_bp_xcpt_if, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_1_debug_fsrc, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_1_debug_tsrc, // @[rename-stage.scala:60:14]
input [6:0] io_com_uops_2_uopc, // @[rename-stage.scala:60:14]
input [31:0] io_com_uops_2_inst, // @[rename-stage.scala:60:14]
input [31:0] io_com_uops_2_debug_inst, // @[rename-stage.scala:60:14]
input io_com_uops_2_is_rvc, // @[rename-stage.scala:60:14]
input [39:0] io_com_uops_2_debug_pc, // @[rename-stage.scala:60:14]
input [2:0] io_com_uops_2_iq_type, // @[rename-stage.scala:60:14]
input [9:0] io_com_uops_2_fu_code, // @[rename-stage.scala:60:14]
input [3:0] io_com_uops_2_ctrl_br_type, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_2_ctrl_op1_sel, // @[rename-stage.scala:60:14]
input [2:0] io_com_uops_2_ctrl_op2_sel, // @[rename-stage.scala:60:14]
input [2:0] io_com_uops_2_ctrl_imm_sel, // @[rename-stage.scala:60:14]
input [4:0] io_com_uops_2_ctrl_op_fcn, // @[rename-stage.scala:60:14]
input io_com_uops_2_ctrl_fcn_dw, // @[rename-stage.scala:60:14]
input [2:0] io_com_uops_2_ctrl_csr_cmd, // @[rename-stage.scala:60:14]
input io_com_uops_2_ctrl_is_load, // @[rename-stage.scala:60:14]
input io_com_uops_2_ctrl_is_sta, // @[rename-stage.scala:60:14]
input io_com_uops_2_ctrl_is_std, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_2_iw_state, // @[rename-stage.scala:60:14]
input io_com_uops_2_iw_p1_poisoned, // @[rename-stage.scala:60:14]
input io_com_uops_2_iw_p2_poisoned, // @[rename-stage.scala:60:14]
input io_com_uops_2_is_br, // @[rename-stage.scala:60:14]
input io_com_uops_2_is_jalr, // @[rename-stage.scala:60:14]
input io_com_uops_2_is_jal, // @[rename-stage.scala:60:14]
input io_com_uops_2_is_sfb, // @[rename-stage.scala:60:14]
input [15:0] io_com_uops_2_br_mask, // @[rename-stage.scala:60:14]
input [3:0] io_com_uops_2_br_tag, // @[rename-stage.scala:60:14]
input [4:0] io_com_uops_2_ftq_idx, // @[rename-stage.scala:60:14]
input io_com_uops_2_edge_inst, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_2_pc_lob, // @[rename-stage.scala:60:14]
input io_com_uops_2_taken, // @[rename-stage.scala:60:14]
input [19:0] io_com_uops_2_imm_packed, // @[rename-stage.scala:60:14]
input [11:0] io_com_uops_2_csr_addr, // @[rename-stage.scala:60:14]
input [6:0] io_com_uops_2_rob_idx, // @[rename-stage.scala:60:14]
input [4:0] io_com_uops_2_ldq_idx, // @[rename-stage.scala:60:14]
input [4:0] io_com_uops_2_stq_idx, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_2_rxq_idx, // @[rename-stage.scala:60:14]
input [6:0] io_com_uops_2_pdst, // @[rename-stage.scala:60:14]
input [6:0] io_com_uops_2_prs1, // @[rename-stage.scala:60:14]
input [6:0] io_com_uops_2_prs2, // @[rename-stage.scala:60:14]
input [6:0] io_com_uops_2_prs3, // @[rename-stage.scala:60:14]
input [4:0] io_com_uops_2_ppred, // @[rename-stage.scala:60:14]
input io_com_uops_2_prs1_busy, // @[rename-stage.scala:60:14]
input io_com_uops_2_prs2_busy, // @[rename-stage.scala:60:14]
input io_com_uops_2_prs3_busy, // @[rename-stage.scala:60:14]
input io_com_uops_2_ppred_busy, // @[rename-stage.scala:60:14]
input [6:0] io_com_uops_2_stale_pdst, // @[rename-stage.scala:60:14]
input io_com_uops_2_exception, // @[rename-stage.scala:60:14]
input [63:0] io_com_uops_2_exc_cause, // @[rename-stage.scala:60:14]
input io_com_uops_2_bypassable, // @[rename-stage.scala:60:14]
input [4:0] io_com_uops_2_mem_cmd, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_2_mem_size, // @[rename-stage.scala:60:14]
input io_com_uops_2_mem_signed, // @[rename-stage.scala:60:14]
input io_com_uops_2_is_fence, // @[rename-stage.scala:60:14]
input io_com_uops_2_is_fencei, // @[rename-stage.scala:60:14]
input io_com_uops_2_is_amo, // @[rename-stage.scala:60:14]
input io_com_uops_2_uses_ldq, // @[rename-stage.scala:60:14]
input io_com_uops_2_uses_stq, // @[rename-stage.scala:60:14]
input io_com_uops_2_is_sys_pc2epc, // @[rename-stage.scala:60:14]
input io_com_uops_2_is_unique, // @[rename-stage.scala:60:14]
input io_com_uops_2_flush_on_commit, // @[rename-stage.scala:60:14]
input io_com_uops_2_ldst_is_rs1, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_2_ldst, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_2_lrs1, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_2_lrs2, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_2_lrs3, // @[rename-stage.scala:60:14]
input io_com_uops_2_ldst_val, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_2_dst_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_2_lrs1_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_2_lrs2_rtype, // @[rename-stage.scala:60:14]
input io_com_uops_2_frs3_en, // @[rename-stage.scala:60:14]
input io_com_uops_2_fp_val, // @[rename-stage.scala:60:14]
input io_com_uops_2_fp_single, // @[rename-stage.scala:60:14]
input io_com_uops_2_xcpt_pf_if, // @[rename-stage.scala:60:14]
input io_com_uops_2_xcpt_ae_if, // @[rename-stage.scala:60:14]
input io_com_uops_2_xcpt_ma_if, // @[rename-stage.scala:60:14]
input io_com_uops_2_bp_debug_if, // @[rename-stage.scala:60:14]
input io_com_uops_2_bp_xcpt_if, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_2_debug_fsrc, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_2_debug_tsrc, // @[rename-stage.scala:60:14]
input io_rbk_valids_0, // @[rename-stage.scala:60:14]
input io_rbk_valids_1, // @[rename-stage.scala:60:14]
input io_rbk_valids_2, // @[rename-stage.scala:60:14]
input io_rollback, // @[rename-stage.scala:60:14]
input io_debug_rob_empty // @[rename-stage.scala:60:14]
);
wire [1:0] bypassed_uop_2_debug_tsrc; // @[rename-stage.scala:341:28]
wire [1:0] bypassed_uop_2_debug_fsrc; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_bp_xcpt_if; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_bp_debug_if; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_xcpt_ma_if; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_xcpt_ae_if; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_xcpt_pf_if; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_fp_single; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_fp_val; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_frs3_en; // @[rename-stage.scala:341:28]
wire [1:0] bypassed_uop_2_lrs2_rtype; // @[rename-stage.scala:341:28]
wire [1:0] bypassed_uop_2_lrs1_rtype; // @[rename-stage.scala:341:28]
wire [1:0] bypassed_uop_2_dst_rtype; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_ldst_val; // @[rename-stage.scala:341:28]
wire [5:0] bypassed_uop_2_lrs3; // @[rename-stage.scala:341:28]
wire [5:0] bypassed_uop_2_lrs2; // @[rename-stage.scala:341:28]
wire [5:0] bypassed_uop_2_lrs1; // @[rename-stage.scala:341:28]
wire [5:0] bypassed_uop_2_ldst; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_ldst_is_rs1; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_flush_on_commit; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_is_unique; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_is_sys_pc2epc; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_uses_stq; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_uses_ldq; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_is_amo; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_is_fencei; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_is_fence; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_mem_signed; // @[rename-stage.scala:341:28]
wire [1:0] bypassed_uop_2_mem_size; // @[rename-stage.scala:341:28]
wire [4:0] bypassed_uop_2_mem_cmd; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_bypassable; // @[rename-stage.scala:341:28]
wire [63:0] bypassed_uop_2_exc_cause; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_exception; // @[rename-stage.scala:341:28]
wire [6:0] bypassed_uop_2_stale_pdst; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_ppred_busy; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_prs3_busy; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_prs2_busy; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_prs1_busy; // @[rename-stage.scala:341:28]
wire [4:0] bypassed_uop_2_ppred; // @[rename-stage.scala:341:28]
wire [6:0] bypassed_uop_2_prs3; // @[rename-stage.scala:341:28]
wire [6:0] bypassed_uop_2_prs2; // @[rename-stage.scala:341:28]
wire [6:0] bypassed_uop_2_prs1; // @[rename-stage.scala:341:28]
wire [6:0] bypassed_uop_2_pdst; // @[rename-stage.scala:341:28]
wire [1:0] bypassed_uop_2_rxq_idx; // @[rename-stage.scala:341:28]
wire [4:0] bypassed_uop_2_stq_idx; // @[rename-stage.scala:341:28]
wire [4:0] bypassed_uop_2_ldq_idx; // @[rename-stage.scala:341:28]
wire [6:0] bypassed_uop_2_rob_idx; // @[rename-stage.scala:341:28]
wire [11:0] bypassed_uop_2_csr_addr; // @[rename-stage.scala:341:28]
wire [19:0] bypassed_uop_2_imm_packed; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_taken; // @[rename-stage.scala:341:28]
wire [5:0] bypassed_uop_2_pc_lob; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_edge_inst; // @[rename-stage.scala:341:28]
wire [4:0] bypassed_uop_2_ftq_idx; // @[rename-stage.scala:341:28]
wire [3:0] bypassed_uop_2_br_tag; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_is_sfb; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_is_jal; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_is_jalr; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_is_br; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_iw_p2_poisoned; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_iw_p1_poisoned; // @[rename-stage.scala:341:28]
wire [1:0] bypassed_uop_2_iw_state; // @[rename-stage.scala:341:28]
wire [9:0] bypassed_uop_2_fu_code; // @[rename-stage.scala:341:28]
wire [2:0] bypassed_uop_2_iq_type; // @[rename-stage.scala:341:28]
wire [39:0] bypassed_uop_2_debug_pc; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_is_rvc; // @[rename-stage.scala:341:28]
wire [31:0] bypassed_uop_2_debug_inst; // @[rename-stage.scala:341:28]
wire [31:0] bypassed_uop_2_inst; // @[rename-stage.scala:341:28]
wire [6:0] bypassed_uop_2_uopc; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_ctrl_is_std; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_ctrl_is_sta; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_ctrl_is_load; // @[rename-stage.scala:341:28]
wire [2:0] bypassed_uop_2_ctrl_csr_cmd; // @[rename-stage.scala:341:28]
wire bypassed_uop_2_ctrl_fcn_dw; // @[rename-stage.scala:341:28]
wire [4:0] bypassed_uop_2_ctrl_op_fcn; // @[rename-stage.scala:341:28]
wire [2:0] bypassed_uop_2_ctrl_imm_sel; // @[rename-stage.scala:341:28]
wire [2:0] bypassed_uop_2_ctrl_op2_sel; // @[rename-stage.scala:341:28]
wire [1:0] bypassed_uop_2_ctrl_op1_sel; // @[rename-stage.scala:341:28]
wire [3:0] bypassed_uop_2_ctrl_br_type; // @[rename-stage.scala:341:28]
wire [1:0] bypassed_uop_1_debug_tsrc; // @[rename-stage.scala:341:28]
wire [1:0] bypassed_uop_1_debug_fsrc; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_bp_xcpt_if; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_bp_debug_if; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_xcpt_ma_if; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_xcpt_ae_if; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_xcpt_pf_if; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_fp_single; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_fp_val; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_frs3_en; // @[rename-stage.scala:341:28]
wire [1:0] bypassed_uop_1_lrs2_rtype; // @[rename-stage.scala:341:28]
wire [1:0] bypassed_uop_1_lrs1_rtype; // @[rename-stage.scala:341:28]
wire [1:0] bypassed_uop_1_dst_rtype; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_ldst_val; // @[rename-stage.scala:341:28]
wire [5:0] bypassed_uop_1_lrs3; // @[rename-stage.scala:341:28]
wire [5:0] bypassed_uop_1_lrs2; // @[rename-stage.scala:341:28]
wire [5:0] bypassed_uop_1_lrs1; // @[rename-stage.scala:341:28]
wire [5:0] bypassed_uop_1_ldst; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_ldst_is_rs1; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_flush_on_commit; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_is_unique; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_is_sys_pc2epc; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_uses_stq; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_uses_ldq; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_is_amo; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_is_fencei; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_is_fence; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_mem_signed; // @[rename-stage.scala:341:28]
wire [1:0] bypassed_uop_1_mem_size; // @[rename-stage.scala:341:28]
wire [4:0] bypassed_uop_1_mem_cmd; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_bypassable; // @[rename-stage.scala:341:28]
wire [63:0] bypassed_uop_1_exc_cause; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_exception; // @[rename-stage.scala:341:28]
wire [6:0] bypassed_uop_1_stale_pdst; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_ppred_busy; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_prs3_busy; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_prs2_busy; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_prs1_busy; // @[rename-stage.scala:341:28]
wire [4:0] bypassed_uop_1_ppred; // @[rename-stage.scala:341:28]
wire [6:0] bypassed_uop_1_prs3; // @[rename-stage.scala:341:28]
wire [6:0] bypassed_uop_1_prs2; // @[rename-stage.scala:341:28]
wire [6:0] bypassed_uop_1_prs1; // @[rename-stage.scala:341:28]
wire [6:0] bypassed_uop_1_pdst; // @[rename-stage.scala:341:28]
wire [1:0] bypassed_uop_1_rxq_idx; // @[rename-stage.scala:341:28]
wire [4:0] bypassed_uop_1_stq_idx; // @[rename-stage.scala:341:28]
wire [4:0] bypassed_uop_1_ldq_idx; // @[rename-stage.scala:341:28]
wire [6:0] bypassed_uop_1_rob_idx; // @[rename-stage.scala:341:28]
wire [11:0] bypassed_uop_1_csr_addr; // @[rename-stage.scala:341:28]
wire [19:0] bypassed_uop_1_imm_packed; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_taken; // @[rename-stage.scala:341:28]
wire [5:0] bypassed_uop_1_pc_lob; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_edge_inst; // @[rename-stage.scala:341:28]
wire [4:0] bypassed_uop_1_ftq_idx; // @[rename-stage.scala:341:28]
wire [3:0] bypassed_uop_1_br_tag; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_is_sfb; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_is_jal; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_is_jalr; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_is_br; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_iw_p2_poisoned; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_iw_p1_poisoned; // @[rename-stage.scala:341:28]
wire [1:0] bypassed_uop_1_iw_state; // @[rename-stage.scala:341:28]
wire [9:0] bypassed_uop_1_fu_code; // @[rename-stage.scala:341:28]
wire [2:0] bypassed_uop_1_iq_type; // @[rename-stage.scala:341:28]
wire [39:0] bypassed_uop_1_debug_pc; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_is_rvc; // @[rename-stage.scala:341:28]
wire [31:0] bypassed_uop_1_debug_inst; // @[rename-stage.scala:341:28]
wire [31:0] bypassed_uop_1_inst; // @[rename-stage.scala:341:28]
wire [6:0] bypassed_uop_1_uopc; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_ctrl_is_std; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_ctrl_is_sta; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_ctrl_is_load; // @[rename-stage.scala:341:28]
wire [2:0] bypassed_uop_1_ctrl_csr_cmd; // @[rename-stage.scala:341:28]
wire bypassed_uop_1_ctrl_fcn_dw; // @[rename-stage.scala:341:28]
wire [4:0] bypassed_uop_1_ctrl_op_fcn; // @[rename-stage.scala:341:28]
wire [2:0] bypassed_uop_1_ctrl_imm_sel; // @[rename-stage.scala:341:28]
wire [2:0] bypassed_uop_1_ctrl_op2_sel; // @[rename-stage.scala:341:28]
wire [1:0] bypassed_uop_1_ctrl_op1_sel; // @[rename-stage.scala:341:28]
wire [3:0] bypassed_uop_1_ctrl_br_type; // @[rename-stage.scala:341:28]
wire [6:0] r_uop_bypassed_uop_2_stale_pdst; // @[rename-stage.scala:174:28]
wire r_uop_bypassed_uop_2_prs3_busy; // @[rename-stage.scala:174:28]
wire r_uop_bypassed_uop_2_prs2_busy; // @[rename-stage.scala:174:28]
wire r_uop_bypassed_uop_2_prs1_busy; // @[rename-stage.scala:174:28]
wire [6:0] r_uop_bypassed_uop_2_prs3; // @[rename-stage.scala:174:28]
wire [6:0] r_uop_bypassed_uop_2_prs2; // @[rename-stage.scala:174:28]
wire [6:0] r_uop_bypassed_uop_2_prs1; // @[rename-stage.scala:174:28]
wire [1:0] next_uop_2_debug_tsrc; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_2_debug_fsrc; // @[rename-stage.scala:123:24]
wire next_uop_2_bp_xcpt_if; // @[rename-stage.scala:123:24]
wire next_uop_2_bp_debug_if; // @[rename-stage.scala:123:24]
wire next_uop_2_xcpt_ma_if; // @[rename-stage.scala:123:24]
wire next_uop_2_xcpt_ae_if; // @[rename-stage.scala:123:24]
wire next_uop_2_xcpt_pf_if; // @[rename-stage.scala:123:24]
wire next_uop_2_fp_single; // @[rename-stage.scala:123:24]
wire next_uop_2_fp_val; // @[rename-stage.scala:123:24]
wire next_uop_2_frs3_en; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_2_lrs2_rtype; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_2_lrs1_rtype; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_2_dst_rtype; // @[rename-stage.scala:123:24]
wire next_uop_2_ldst_val; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_2_lrs3; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_2_lrs2; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_2_lrs1; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_2_ldst; // @[rename-stage.scala:123:24]
wire next_uop_2_ldst_is_rs1; // @[rename-stage.scala:123:24]
wire next_uop_2_flush_on_commit; // @[rename-stage.scala:123:24]
wire next_uop_2_is_unique; // @[rename-stage.scala:123:24]
wire next_uop_2_is_sys_pc2epc; // @[rename-stage.scala:123:24]
wire next_uop_2_uses_stq; // @[rename-stage.scala:123:24]
wire next_uop_2_uses_ldq; // @[rename-stage.scala:123:24]
wire next_uop_2_is_amo; // @[rename-stage.scala:123:24]
wire next_uop_2_is_fencei; // @[rename-stage.scala:123:24]
wire next_uop_2_is_fence; // @[rename-stage.scala:123:24]
wire next_uop_2_mem_signed; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_2_mem_size; // @[rename-stage.scala:123:24]
wire [4:0] next_uop_2_mem_cmd; // @[rename-stage.scala:123:24]
wire next_uop_2_bypassable; // @[rename-stage.scala:123:24]
wire [63:0] next_uop_2_exc_cause; // @[rename-stage.scala:123:24]
wire next_uop_2_exception; // @[rename-stage.scala:123:24]
wire next_uop_2_ppred_busy; // @[rename-stage.scala:123:24]
wire [4:0] next_uop_2_ppred; // @[rename-stage.scala:123:24]
wire [6:0] next_uop_2_pdst; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_2_rxq_idx; // @[rename-stage.scala:123:24]
wire [4:0] next_uop_2_stq_idx; // @[rename-stage.scala:123:24]
wire [4:0] next_uop_2_ldq_idx; // @[rename-stage.scala:123:24]
wire [6:0] next_uop_2_rob_idx; // @[rename-stage.scala:123:24]
wire [11:0] next_uop_2_csr_addr; // @[rename-stage.scala:123:24]
wire [19:0] next_uop_2_imm_packed; // @[rename-stage.scala:123:24]
wire next_uop_2_taken; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_2_pc_lob; // @[rename-stage.scala:123:24]
wire next_uop_2_edge_inst; // @[rename-stage.scala:123:24]
wire [4:0] next_uop_2_ftq_idx; // @[rename-stage.scala:123:24]
wire [3:0] next_uop_2_br_tag; // @[rename-stage.scala:123:24]
wire [15:0] next_uop_2_br_mask; // @[rename-stage.scala:123:24]
wire next_uop_2_is_sfb; // @[rename-stage.scala:123:24]
wire next_uop_2_is_jal; // @[rename-stage.scala:123:24]
wire next_uop_2_is_jalr; // @[rename-stage.scala:123:24]
wire next_uop_2_is_br; // @[rename-stage.scala:123:24]
wire next_uop_2_iw_p2_poisoned; // @[rename-stage.scala:123:24]
wire next_uop_2_iw_p1_poisoned; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_2_iw_state; // @[rename-stage.scala:123:24]
wire [9:0] next_uop_2_fu_code; // @[rename-stage.scala:123:24]
wire [2:0] next_uop_2_iq_type; // @[rename-stage.scala:123:24]
wire [39:0] next_uop_2_debug_pc; // @[rename-stage.scala:123:24]
wire next_uop_2_is_rvc; // @[rename-stage.scala:123:24]
wire [31:0] next_uop_2_debug_inst; // @[rename-stage.scala:123:24]
wire [31:0] next_uop_2_inst; // @[rename-stage.scala:123:24]
wire [6:0] next_uop_2_uopc; // @[rename-stage.scala:123:24]
wire next_uop_2_ctrl_is_std; // @[rename-stage.scala:123:24]
wire next_uop_2_ctrl_is_sta; // @[rename-stage.scala:123:24]
wire next_uop_2_ctrl_is_load; // @[rename-stage.scala:123:24]
wire [2:0] next_uop_2_ctrl_csr_cmd; // @[rename-stage.scala:123:24]
wire next_uop_2_ctrl_fcn_dw; // @[rename-stage.scala:123:24]
wire [4:0] next_uop_2_ctrl_op_fcn; // @[rename-stage.scala:123:24]
wire [2:0] next_uop_2_ctrl_imm_sel; // @[rename-stage.scala:123:24]
wire [2:0] next_uop_2_ctrl_op2_sel; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_2_ctrl_op1_sel; // @[rename-stage.scala:123:24]
wire [3:0] next_uop_2_ctrl_br_type; // @[rename-stage.scala:123:24]
wire [6:0] r_uop_bypassed_uop_1_stale_pdst; // @[rename-stage.scala:174:28]
wire r_uop_bypassed_uop_1_prs3_busy; // @[rename-stage.scala:174:28]
wire r_uop_bypassed_uop_1_prs2_busy; // @[rename-stage.scala:174:28]
wire r_uop_bypassed_uop_1_prs1_busy; // @[rename-stage.scala:174:28]
wire [6:0] r_uop_bypassed_uop_1_prs3; // @[rename-stage.scala:174:28]
wire [6:0] r_uop_bypassed_uop_1_prs2; // @[rename-stage.scala:174:28]
wire [6:0] r_uop_bypassed_uop_1_prs1; // @[rename-stage.scala:174:28]
wire [1:0] next_uop_1_debug_tsrc; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_1_debug_fsrc; // @[rename-stage.scala:123:24]
wire next_uop_1_bp_xcpt_if; // @[rename-stage.scala:123:24]
wire next_uop_1_bp_debug_if; // @[rename-stage.scala:123:24]
wire next_uop_1_xcpt_ma_if; // @[rename-stage.scala:123:24]
wire next_uop_1_xcpt_ae_if; // @[rename-stage.scala:123:24]
wire next_uop_1_xcpt_pf_if; // @[rename-stage.scala:123:24]
wire next_uop_1_fp_single; // @[rename-stage.scala:123:24]
wire next_uop_1_fp_val; // @[rename-stage.scala:123:24]
wire next_uop_1_frs3_en; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_1_lrs2_rtype; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_1_lrs1_rtype; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_1_dst_rtype; // @[rename-stage.scala:123:24]
wire next_uop_1_ldst_val; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_1_lrs3; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_1_lrs2; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_1_lrs1; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_1_ldst; // @[rename-stage.scala:123:24]
wire next_uop_1_ldst_is_rs1; // @[rename-stage.scala:123:24]
wire next_uop_1_flush_on_commit; // @[rename-stage.scala:123:24]
wire next_uop_1_is_unique; // @[rename-stage.scala:123:24]
wire next_uop_1_is_sys_pc2epc; // @[rename-stage.scala:123:24]
wire next_uop_1_uses_stq; // @[rename-stage.scala:123:24]
wire next_uop_1_uses_ldq; // @[rename-stage.scala:123:24]
wire next_uop_1_is_amo; // @[rename-stage.scala:123:24]
wire next_uop_1_is_fencei; // @[rename-stage.scala:123:24]
wire next_uop_1_is_fence; // @[rename-stage.scala:123:24]
wire next_uop_1_mem_signed; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_1_mem_size; // @[rename-stage.scala:123:24]
wire [4:0] next_uop_1_mem_cmd; // @[rename-stage.scala:123:24]
wire next_uop_1_bypassable; // @[rename-stage.scala:123:24]
wire [63:0] next_uop_1_exc_cause; // @[rename-stage.scala:123:24]
wire next_uop_1_exception; // @[rename-stage.scala:123:24]
wire next_uop_1_ppred_busy; // @[rename-stage.scala:123:24]
wire [4:0] next_uop_1_ppred; // @[rename-stage.scala:123:24]
wire [6:0] next_uop_1_pdst; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_1_rxq_idx; // @[rename-stage.scala:123:24]
wire [4:0] next_uop_1_stq_idx; // @[rename-stage.scala:123:24]
wire [4:0] next_uop_1_ldq_idx; // @[rename-stage.scala:123:24]
wire [6:0] next_uop_1_rob_idx; // @[rename-stage.scala:123:24]
wire [11:0] next_uop_1_csr_addr; // @[rename-stage.scala:123:24]
wire [19:0] next_uop_1_imm_packed; // @[rename-stage.scala:123:24]
wire next_uop_1_taken; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_1_pc_lob; // @[rename-stage.scala:123:24]
wire next_uop_1_edge_inst; // @[rename-stage.scala:123:24]
wire [4:0] next_uop_1_ftq_idx; // @[rename-stage.scala:123:24]
wire [3:0] next_uop_1_br_tag; // @[rename-stage.scala:123:24]
wire [15:0] next_uop_1_br_mask; // @[rename-stage.scala:123:24]
wire next_uop_1_is_sfb; // @[rename-stage.scala:123:24]
wire next_uop_1_is_jal; // @[rename-stage.scala:123:24]
wire next_uop_1_is_jalr; // @[rename-stage.scala:123:24]
wire next_uop_1_is_br; // @[rename-stage.scala:123:24]
wire next_uop_1_iw_p2_poisoned; // @[rename-stage.scala:123:24]
wire next_uop_1_iw_p1_poisoned; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_1_iw_state; // @[rename-stage.scala:123:24]
wire [9:0] next_uop_1_fu_code; // @[rename-stage.scala:123:24]
wire [2:0] next_uop_1_iq_type; // @[rename-stage.scala:123:24]
wire [39:0] next_uop_1_debug_pc; // @[rename-stage.scala:123:24]
wire next_uop_1_is_rvc; // @[rename-stage.scala:123:24]
wire [31:0] next_uop_1_debug_inst; // @[rename-stage.scala:123:24]
wire [31:0] next_uop_1_inst; // @[rename-stage.scala:123:24]
wire [6:0] next_uop_1_uopc; // @[rename-stage.scala:123:24]
wire next_uop_1_ctrl_is_std; // @[rename-stage.scala:123:24]
wire next_uop_1_ctrl_is_sta; // @[rename-stage.scala:123:24]
wire next_uop_1_ctrl_is_load; // @[rename-stage.scala:123:24]
wire [2:0] next_uop_1_ctrl_csr_cmd; // @[rename-stage.scala:123:24]
wire next_uop_1_ctrl_fcn_dw; // @[rename-stage.scala:123:24]
wire [4:0] next_uop_1_ctrl_op_fcn; // @[rename-stage.scala:123:24]
wire [2:0] next_uop_1_ctrl_imm_sel; // @[rename-stage.scala:123:24]
wire [2:0] next_uop_1_ctrl_op2_sel; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_1_ctrl_op1_sel; // @[rename-stage.scala:123:24]
wire [3:0] next_uop_1_ctrl_br_type; // @[rename-stage.scala:123:24]
wire [6:0] r_uop_bypassed_uop_stale_pdst; // @[rename-stage.scala:174:28]
wire r_uop_bypassed_uop_prs3_busy; // @[rename-stage.scala:174:28]
wire r_uop_bypassed_uop_prs2_busy; // @[rename-stage.scala:174:28]
wire r_uop_bypassed_uop_prs1_busy; // @[rename-stage.scala:174:28]
wire [6:0] r_uop_bypassed_uop_prs3; // @[rename-stage.scala:174:28]
wire [6:0] r_uop_bypassed_uop_prs2; // @[rename-stage.scala:174:28]
wire [6:0] r_uop_bypassed_uop_prs1; // @[rename-stage.scala:174:28]
wire [1:0] next_uop_debug_tsrc; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_debug_fsrc; // @[rename-stage.scala:123:24]
wire next_uop_bp_xcpt_if; // @[rename-stage.scala:123:24]
wire next_uop_bp_debug_if; // @[rename-stage.scala:123:24]
wire next_uop_xcpt_ma_if; // @[rename-stage.scala:123:24]
wire next_uop_xcpt_ae_if; // @[rename-stage.scala:123:24]
wire next_uop_xcpt_pf_if; // @[rename-stage.scala:123:24]
wire next_uop_fp_single; // @[rename-stage.scala:123:24]
wire next_uop_fp_val; // @[rename-stage.scala:123:24]
wire next_uop_frs3_en; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_lrs2_rtype; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_lrs1_rtype; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_dst_rtype; // @[rename-stage.scala:123:24]
wire next_uop_ldst_val; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_lrs3; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_lrs2; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_lrs1; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_ldst; // @[rename-stage.scala:123:24]
wire next_uop_ldst_is_rs1; // @[rename-stage.scala:123:24]
wire next_uop_flush_on_commit; // @[rename-stage.scala:123:24]
wire next_uop_is_unique; // @[rename-stage.scala:123:24]
wire next_uop_is_sys_pc2epc; // @[rename-stage.scala:123:24]
wire next_uop_uses_stq; // @[rename-stage.scala:123:24]
wire next_uop_uses_ldq; // @[rename-stage.scala:123:24]
wire next_uop_is_amo; // @[rename-stage.scala:123:24]
wire next_uop_is_fencei; // @[rename-stage.scala:123:24]
wire next_uop_is_fence; // @[rename-stage.scala:123:24]
wire next_uop_mem_signed; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_mem_size; // @[rename-stage.scala:123:24]
wire [4:0] next_uop_mem_cmd; // @[rename-stage.scala:123:24]
wire next_uop_bypassable; // @[rename-stage.scala:123:24]
wire [63:0] next_uop_exc_cause; // @[rename-stage.scala:123:24]
wire next_uop_exception; // @[rename-stage.scala:123:24]
wire next_uop_ppred_busy; // @[rename-stage.scala:123:24]
wire [4:0] next_uop_ppred; // @[rename-stage.scala:123:24]
wire [6:0] next_uop_pdst; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_rxq_idx; // @[rename-stage.scala:123:24]
wire [4:0] next_uop_stq_idx; // @[rename-stage.scala:123:24]
wire [4:0] next_uop_ldq_idx; // @[rename-stage.scala:123:24]
wire [6:0] next_uop_rob_idx; // @[rename-stage.scala:123:24]
wire [11:0] next_uop_csr_addr; // @[rename-stage.scala:123:24]
wire [19:0] next_uop_imm_packed; // @[rename-stage.scala:123:24]
wire next_uop_taken; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_pc_lob; // @[rename-stage.scala:123:24]
wire next_uop_edge_inst; // @[rename-stage.scala:123:24]
wire [4:0] next_uop_ftq_idx; // @[rename-stage.scala:123:24]
wire [3:0] next_uop_br_tag; // @[rename-stage.scala:123:24]
wire [15:0] next_uop_br_mask; // @[rename-stage.scala:123:24]
wire next_uop_is_sfb; // @[rename-stage.scala:123:24]
wire next_uop_is_jal; // @[rename-stage.scala:123:24]
wire next_uop_is_jalr; // @[rename-stage.scala:123:24]
wire next_uop_is_br; // @[rename-stage.scala:123:24]
wire next_uop_iw_p2_poisoned; // @[rename-stage.scala:123:24]
wire next_uop_iw_p1_poisoned; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_iw_state; // @[rename-stage.scala:123:24]
wire [9:0] next_uop_fu_code; // @[rename-stage.scala:123:24]
wire [2:0] next_uop_iq_type; // @[rename-stage.scala:123:24]
wire [39:0] next_uop_debug_pc; // @[rename-stage.scala:123:24]
wire next_uop_is_rvc; // @[rename-stage.scala:123:24]
wire [31:0] next_uop_debug_inst; // @[rename-stage.scala:123:24]
wire [31:0] next_uop_inst; // @[rename-stage.scala:123:24]
wire [6:0] next_uop_uopc; // @[rename-stage.scala:123:24]
wire next_uop_ctrl_is_std; // @[rename-stage.scala:123:24]
wire next_uop_ctrl_is_sta; // @[rename-stage.scala:123:24]
wire next_uop_ctrl_is_load; // @[rename-stage.scala:123:24]
wire [2:0] next_uop_ctrl_csr_cmd; // @[rename-stage.scala:123:24]
wire next_uop_ctrl_fcn_dw; // @[rename-stage.scala:123:24]
wire [4:0] next_uop_ctrl_op_fcn; // @[rename-stage.scala:123:24]
wire [2:0] next_uop_ctrl_imm_sel; // @[rename-stage.scala:123:24]
wire [2:0] next_uop_ctrl_op2_sel; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_ctrl_op1_sel; // @[rename-stage.scala:123:24]
wire [3:0] next_uop_ctrl_br_type; // @[rename-stage.scala:123:24]
wire [1:0] ren2_uops_2_debug_tsrc; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_2_debug_fsrc; // @[rename-stage.scala:108:29]
wire ren2_uops_2_bp_xcpt_if; // @[rename-stage.scala:108:29]
wire ren2_uops_2_bp_debug_if; // @[rename-stage.scala:108:29]
wire ren2_uops_2_xcpt_ma_if; // @[rename-stage.scala:108:29]
wire ren2_uops_2_xcpt_ae_if; // @[rename-stage.scala:108:29]
wire ren2_uops_2_xcpt_pf_if; // @[rename-stage.scala:108:29]
wire ren2_uops_2_fp_single; // @[rename-stage.scala:108:29]
wire ren2_uops_2_fp_val; // @[rename-stage.scala:108:29]
wire ren2_uops_2_frs3_en; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_2_lrs2_rtype; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_2_lrs1_rtype; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_2_dst_rtype; // @[rename-stage.scala:108:29]
wire ren2_uops_2_ldst_val; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_2_lrs3; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_2_lrs2; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_2_lrs1; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_2_ldst; // @[rename-stage.scala:108:29]
wire ren2_uops_2_ldst_is_rs1; // @[rename-stage.scala:108:29]
wire ren2_uops_2_flush_on_commit; // @[rename-stage.scala:108:29]
wire ren2_uops_2_is_unique; // @[rename-stage.scala:108:29]
wire ren2_uops_2_is_sys_pc2epc; // @[rename-stage.scala:108:29]
wire ren2_uops_2_uses_stq; // @[rename-stage.scala:108:29]
wire ren2_uops_2_uses_ldq; // @[rename-stage.scala:108:29]
wire ren2_uops_2_is_amo; // @[rename-stage.scala:108:29]
wire ren2_uops_2_is_fencei; // @[rename-stage.scala:108:29]
wire ren2_uops_2_is_fence; // @[rename-stage.scala:108:29]
wire ren2_uops_2_mem_signed; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_2_mem_size; // @[rename-stage.scala:108:29]
wire [4:0] ren2_uops_2_mem_cmd; // @[rename-stage.scala:108:29]
wire ren2_uops_2_bypassable; // @[rename-stage.scala:108:29]
wire [63:0] ren2_uops_2_exc_cause; // @[rename-stage.scala:108:29]
wire ren2_uops_2_exception; // @[rename-stage.scala:108:29]
wire ren2_uops_2_ppred_busy; // @[rename-stage.scala:108:29]
wire [4:0] ren2_uops_2_ppred; // @[rename-stage.scala:108:29]
wire [6:0] ren2_uops_2_pdst; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_2_rxq_idx; // @[rename-stage.scala:108:29]
wire [4:0] ren2_uops_2_stq_idx; // @[rename-stage.scala:108:29]
wire [4:0] ren2_uops_2_ldq_idx; // @[rename-stage.scala:108:29]
wire [6:0] ren2_uops_2_rob_idx; // @[rename-stage.scala:108:29]
wire [11:0] ren2_uops_2_csr_addr; // @[rename-stage.scala:108:29]
wire [19:0] ren2_uops_2_imm_packed; // @[rename-stage.scala:108:29]
wire ren2_uops_2_taken; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_2_pc_lob; // @[rename-stage.scala:108:29]
wire ren2_uops_2_edge_inst; // @[rename-stage.scala:108:29]
wire [4:0] ren2_uops_2_ftq_idx; // @[rename-stage.scala:108:29]
wire [3:0] ren2_uops_2_br_tag; // @[rename-stage.scala:108:29]
wire [15:0] ren2_uops_2_br_mask; // @[rename-stage.scala:108:29]
wire ren2_uops_2_is_sfb; // @[rename-stage.scala:108:29]
wire ren2_uops_2_is_jal; // @[rename-stage.scala:108:29]
wire ren2_uops_2_is_jalr; // @[rename-stage.scala:108:29]
wire ren2_uops_2_is_br; // @[rename-stage.scala:108:29]
wire ren2_uops_2_iw_p2_poisoned; // @[rename-stage.scala:108:29]
wire ren2_uops_2_iw_p1_poisoned; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_2_iw_state; // @[rename-stage.scala:108:29]
wire [9:0] ren2_uops_2_fu_code; // @[rename-stage.scala:108:29]
wire [2:0] ren2_uops_2_iq_type; // @[rename-stage.scala:108:29]
wire [39:0] ren2_uops_2_debug_pc; // @[rename-stage.scala:108:29]
wire ren2_uops_2_is_rvc; // @[rename-stage.scala:108:29]
wire [31:0] ren2_uops_2_debug_inst; // @[rename-stage.scala:108:29]
wire [31:0] ren2_uops_2_inst; // @[rename-stage.scala:108:29]
wire [6:0] ren2_uops_2_uopc; // @[rename-stage.scala:108:29]
wire ren2_uops_2_ctrl_is_std; // @[rename-stage.scala:108:29]
wire ren2_uops_2_ctrl_is_sta; // @[rename-stage.scala:108:29]
wire ren2_uops_2_ctrl_is_load; // @[rename-stage.scala:108:29]
wire [2:0] ren2_uops_2_ctrl_csr_cmd; // @[rename-stage.scala:108:29]
wire ren2_uops_2_ctrl_fcn_dw; // @[rename-stage.scala:108:29]
wire [4:0] ren2_uops_2_ctrl_op_fcn; // @[rename-stage.scala:108:29]
wire [2:0] ren2_uops_2_ctrl_imm_sel; // @[rename-stage.scala:108:29]
wire [2:0] ren2_uops_2_ctrl_op2_sel; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_2_ctrl_op1_sel; // @[rename-stage.scala:108:29]
wire [3:0] ren2_uops_2_ctrl_br_type; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_1_debug_tsrc; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_1_debug_fsrc; // @[rename-stage.scala:108:29]
wire ren2_uops_1_bp_xcpt_if; // @[rename-stage.scala:108:29]
wire ren2_uops_1_bp_debug_if; // @[rename-stage.scala:108:29]
wire ren2_uops_1_xcpt_ma_if; // @[rename-stage.scala:108:29]
wire ren2_uops_1_xcpt_ae_if; // @[rename-stage.scala:108:29]
wire ren2_uops_1_xcpt_pf_if; // @[rename-stage.scala:108:29]
wire ren2_uops_1_fp_single; // @[rename-stage.scala:108:29]
wire ren2_uops_1_fp_val; // @[rename-stage.scala:108:29]
wire ren2_uops_1_frs3_en; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_1_lrs2_rtype; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_1_lrs1_rtype; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_1_dst_rtype; // @[rename-stage.scala:108:29]
wire ren2_uops_1_ldst_val; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_1_lrs3; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_1_lrs2; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_1_lrs1; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_1_ldst; // @[rename-stage.scala:108:29]
wire ren2_uops_1_ldst_is_rs1; // @[rename-stage.scala:108:29]
wire ren2_uops_1_flush_on_commit; // @[rename-stage.scala:108:29]
wire ren2_uops_1_is_unique; // @[rename-stage.scala:108:29]
wire ren2_uops_1_is_sys_pc2epc; // @[rename-stage.scala:108:29]
wire ren2_uops_1_uses_stq; // @[rename-stage.scala:108:29]
wire ren2_uops_1_uses_ldq; // @[rename-stage.scala:108:29]
wire ren2_uops_1_is_amo; // @[rename-stage.scala:108:29]
wire ren2_uops_1_is_fencei; // @[rename-stage.scala:108:29]
wire ren2_uops_1_is_fence; // @[rename-stage.scala:108:29]
wire ren2_uops_1_mem_signed; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_1_mem_size; // @[rename-stage.scala:108:29]
wire [4:0] ren2_uops_1_mem_cmd; // @[rename-stage.scala:108:29]
wire ren2_uops_1_bypassable; // @[rename-stage.scala:108:29]
wire [63:0] ren2_uops_1_exc_cause; // @[rename-stage.scala:108:29]
wire ren2_uops_1_exception; // @[rename-stage.scala:108:29]
wire ren2_uops_1_ppred_busy; // @[rename-stage.scala:108:29]
wire [4:0] ren2_uops_1_ppred; // @[rename-stage.scala:108:29]
wire [6:0] ren2_uops_1_pdst; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_1_rxq_idx; // @[rename-stage.scala:108:29]
wire [4:0] ren2_uops_1_stq_idx; // @[rename-stage.scala:108:29]
wire [4:0] ren2_uops_1_ldq_idx; // @[rename-stage.scala:108:29]
wire [6:0] ren2_uops_1_rob_idx; // @[rename-stage.scala:108:29]
wire [11:0] ren2_uops_1_csr_addr; // @[rename-stage.scala:108:29]
wire [19:0] ren2_uops_1_imm_packed; // @[rename-stage.scala:108:29]
wire ren2_uops_1_taken; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_1_pc_lob; // @[rename-stage.scala:108:29]
wire ren2_uops_1_edge_inst; // @[rename-stage.scala:108:29]
wire [4:0] ren2_uops_1_ftq_idx; // @[rename-stage.scala:108:29]
wire [3:0] ren2_uops_1_br_tag; // @[rename-stage.scala:108:29]
wire [15:0] ren2_uops_1_br_mask; // @[rename-stage.scala:108:29]
wire ren2_uops_1_is_sfb; // @[rename-stage.scala:108:29]
wire ren2_uops_1_is_jal; // @[rename-stage.scala:108:29]
wire ren2_uops_1_is_jalr; // @[rename-stage.scala:108:29]
wire ren2_uops_1_is_br; // @[rename-stage.scala:108:29]
wire ren2_uops_1_iw_p2_poisoned; // @[rename-stage.scala:108:29]
wire ren2_uops_1_iw_p1_poisoned; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_1_iw_state; // @[rename-stage.scala:108:29]
wire [9:0] ren2_uops_1_fu_code; // @[rename-stage.scala:108:29]
wire [2:0] ren2_uops_1_iq_type; // @[rename-stage.scala:108:29]
wire [39:0] ren2_uops_1_debug_pc; // @[rename-stage.scala:108:29]
wire ren2_uops_1_is_rvc; // @[rename-stage.scala:108:29]
wire [31:0] ren2_uops_1_debug_inst; // @[rename-stage.scala:108:29]
wire [31:0] ren2_uops_1_inst; // @[rename-stage.scala:108:29]
wire [6:0] ren2_uops_1_uopc; // @[rename-stage.scala:108:29]
wire ren2_uops_1_ctrl_is_std; // @[rename-stage.scala:108:29]
wire ren2_uops_1_ctrl_is_sta; // @[rename-stage.scala:108:29]
wire ren2_uops_1_ctrl_is_load; // @[rename-stage.scala:108:29]
wire [2:0] ren2_uops_1_ctrl_csr_cmd; // @[rename-stage.scala:108:29]
wire ren2_uops_1_ctrl_fcn_dw; // @[rename-stage.scala:108:29]
wire [4:0] ren2_uops_1_ctrl_op_fcn; // @[rename-stage.scala:108:29]
wire [2:0] ren2_uops_1_ctrl_imm_sel; // @[rename-stage.scala:108:29]
wire [2:0] ren2_uops_1_ctrl_op2_sel; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_1_ctrl_op1_sel; // @[rename-stage.scala:108:29]
wire [3:0] ren2_uops_1_ctrl_br_type; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_0_debug_tsrc; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_0_debug_fsrc; // @[rename-stage.scala:108:29]
wire ren2_uops_0_bp_xcpt_if; // @[rename-stage.scala:108:29]
wire ren2_uops_0_bp_debug_if; // @[rename-stage.scala:108:29]
wire ren2_uops_0_xcpt_ma_if; // @[rename-stage.scala:108:29]
wire ren2_uops_0_xcpt_ae_if; // @[rename-stage.scala:108:29]
wire ren2_uops_0_xcpt_pf_if; // @[rename-stage.scala:108:29]
wire ren2_uops_0_fp_single; // @[rename-stage.scala:108:29]
wire ren2_uops_0_fp_val; // @[rename-stage.scala:108:29]
wire ren2_uops_0_frs3_en; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_0_lrs2_rtype; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_0_lrs1_rtype; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_0_dst_rtype; // @[rename-stage.scala:108:29]
wire ren2_uops_0_ldst_val; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_0_lrs3; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_0_lrs2; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_0_lrs1; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_0_ldst; // @[rename-stage.scala:108:29]
wire ren2_uops_0_ldst_is_rs1; // @[rename-stage.scala:108:29]
wire ren2_uops_0_flush_on_commit; // @[rename-stage.scala:108:29]
wire ren2_uops_0_is_unique; // @[rename-stage.scala:108:29]
wire ren2_uops_0_is_sys_pc2epc; // @[rename-stage.scala:108:29]
wire ren2_uops_0_uses_stq; // @[rename-stage.scala:108:29]
wire ren2_uops_0_uses_ldq; // @[rename-stage.scala:108:29]
wire ren2_uops_0_is_amo; // @[rename-stage.scala:108:29]
wire ren2_uops_0_is_fencei; // @[rename-stage.scala:108:29]
wire ren2_uops_0_is_fence; // @[rename-stage.scala:108:29]
wire ren2_uops_0_mem_signed; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_0_mem_size; // @[rename-stage.scala:108:29]
wire [4:0] ren2_uops_0_mem_cmd; // @[rename-stage.scala:108:29]
wire ren2_uops_0_bypassable; // @[rename-stage.scala:108:29]
wire [63:0] ren2_uops_0_exc_cause; // @[rename-stage.scala:108:29]
wire ren2_uops_0_exception; // @[rename-stage.scala:108:29]
wire [6:0] ren2_uops_0_stale_pdst; // @[rename-stage.scala:108:29]
wire ren2_uops_0_ppred_busy; // @[rename-stage.scala:108:29]
wire ren2_uops_0_prs3_busy; // @[rename-stage.scala:108:29]
wire ren2_uops_0_prs2_busy; // @[rename-stage.scala:108:29]
wire ren2_uops_0_prs1_busy; // @[rename-stage.scala:108:29]
wire [4:0] ren2_uops_0_ppred; // @[rename-stage.scala:108:29]
wire [6:0] ren2_uops_0_prs3; // @[rename-stage.scala:108:29]
wire [6:0] ren2_uops_0_prs2; // @[rename-stage.scala:108:29]
wire [6:0] ren2_uops_0_prs1; // @[rename-stage.scala:108:29]
wire [6:0] ren2_uops_0_pdst; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_0_rxq_idx; // @[rename-stage.scala:108:29]
wire [4:0] ren2_uops_0_stq_idx; // @[rename-stage.scala:108:29]
wire [4:0] ren2_uops_0_ldq_idx; // @[rename-stage.scala:108:29]
wire [6:0] ren2_uops_0_rob_idx; // @[rename-stage.scala:108:29]
wire [11:0] ren2_uops_0_csr_addr; // @[rename-stage.scala:108:29]
wire [19:0] ren2_uops_0_imm_packed; // @[rename-stage.scala:108:29]
wire ren2_uops_0_taken; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_0_pc_lob; // @[rename-stage.scala:108:29]
wire ren2_uops_0_edge_inst; // @[rename-stage.scala:108:29]
wire [4:0] ren2_uops_0_ftq_idx; // @[rename-stage.scala:108:29]
wire [3:0] ren2_uops_0_br_tag; // @[rename-stage.scala:108:29]
wire [15:0] ren2_uops_0_br_mask; // @[rename-stage.scala:108:29]
wire ren2_uops_0_is_sfb; // @[rename-stage.scala:108:29]
wire ren2_uops_0_is_jal; // @[rename-stage.scala:108:29]
wire ren2_uops_0_is_jalr; // @[rename-stage.scala:108:29]
wire ren2_uops_0_is_br; // @[rename-stage.scala:108:29]
wire ren2_uops_0_iw_p2_poisoned; // @[rename-stage.scala:108:29]
wire ren2_uops_0_iw_p1_poisoned; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_0_iw_state; // @[rename-stage.scala:108:29]
wire [9:0] ren2_uops_0_fu_code; // @[rename-stage.scala:108:29]
wire [2:0] ren2_uops_0_iq_type; // @[rename-stage.scala:108:29]
wire [39:0] ren2_uops_0_debug_pc; // @[rename-stage.scala:108:29]
wire ren2_uops_0_is_rvc; // @[rename-stage.scala:108:29]
wire [31:0] ren2_uops_0_debug_inst; // @[rename-stage.scala:108:29]
wire [31:0] ren2_uops_0_inst; // @[rename-stage.scala:108:29]
wire [6:0] ren2_uops_0_uopc; // @[rename-stage.scala:108:29]
wire ren2_uops_0_ctrl_is_std; // @[rename-stage.scala:108:29]
wire ren2_uops_0_ctrl_is_sta; // @[rename-stage.scala:108:29]
wire ren2_uops_0_ctrl_is_load; // @[rename-stage.scala:108:29]
wire [2:0] ren2_uops_0_ctrl_csr_cmd; // @[rename-stage.scala:108:29]
wire ren2_uops_0_ctrl_fcn_dw; // @[rename-stage.scala:108:29]
wire [4:0] ren2_uops_0_ctrl_op_fcn; // @[rename-stage.scala:108:29]
wire [2:0] ren2_uops_0_ctrl_imm_sel; // @[rename-stage.scala:108:29]
wire [2:0] ren2_uops_0_ctrl_op2_sel; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_0_ctrl_op1_sel; // @[rename-stage.scala:108:29]
wire [3:0] ren2_uops_0_ctrl_br_type; // @[rename-stage.scala:108:29]
wire _busytable_io_busy_resps_0_prs1_busy; // @[rename-stage.scala:224:25]
wire _busytable_io_busy_resps_0_prs2_busy; // @[rename-stage.scala:224:25]
wire _busytable_io_busy_resps_0_prs3_busy; // @[rename-stage.scala:224:25]
wire _busytable_io_busy_resps_1_prs1_busy; // @[rename-stage.scala:224:25]
wire _busytable_io_busy_resps_1_prs2_busy; // @[rename-stage.scala:224:25]
wire _busytable_io_busy_resps_1_prs3_busy; // @[rename-stage.scala:224:25]
wire _busytable_io_busy_resps_2_prs1_busy; // @[rename-stage.scala:224:25]
wire _busytable_io_busy_resps_2_prs2_busy; // @[rename-stage.scala:224:25]
wire _busytable_io_busy_resps_2_prs3_busy; // @[rename-stage.scala:224:25]
wire _freelist_io_alloc_pregs_0_valid; // @[rename-stage.scala:220:24]
wire [6:0] _freelist_io_alloc_pregs_0_bits; // @[rename-stage.scala:220:24]
wire _freelist_io_alloc_pregs_1_valid; // @[rename-stage.scala:220:24]
wire [6:0] _freelist_io_alloc_pregs_1_bits; // @[rename-stage.scala:220:24]
wire _freelist_io_alloc_pregs_2_valid; // @[rename-stage.scala:220:24]
wire [6:0] _freelist_io_alloc_pregs_2_bits; // @[rename-stage.scala:220:24]
wire io_kill_0 = io_kill; // @[rename-stage.scala:160:7]
wire io_dec_fire_0_0 = io_dec_fire_0; // @[rename-stage.scala:160:7]
wire io_dec_fire_1_0 = io_dec_fire_1; // @[rename-stage.scala:160:7]
wire io_dec_fire_2_0 = io_dec_fire_2; // @[rename-stage.scala:160:7]
wire [6:0] io_dec_uops_0_uopc_0 = io_dec_uops_0_uopc; // @[rename-stage.scala:160:7]
wire [31:0] io_dec_uops_0_inst_0 = io_dec_uops_0_inst; // @[rename-stage.scala:160:7]
wire [31:0] io_dec_uops_0_debug_inst_0 = io_dec_uops_0_debug_inst; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_is_rvc_0 = io_dec_uops_0_is_rvc; // @[rename-stage.scala:160:7]
wire [39:0] io_dec_uops_0_debug_pc_0 = io_dec_uops_0_debug_pc; // @[rename-stage.scala:160:7]
wire [2:0] io_dec_uops_0_iq_type_0 = io_dec_uops_0_iq_type; // @[rename-stage.scala:160:7]
wire [9:0] io_dec_uops_0_fu_code_0 = io_dec_uops_0_fu_code; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_is_br_0 = io_dec_uops_0_is_br; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_is_jalr_0 = io_dec_uops_0_is_jalr; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_is_jal_0 = io_dec_uops_0_is_jal; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_is_sfb_0 = io_dec_uops_0_is_sfb; // @[rename-stage.scala:160:7]
wire [15:0] io_dec_uops_0_br_mask_0 = io_dec_uops_0_br_mask; // @[rename-stage.scala:160:7]
wire [3:0] io_dec_uops_0_br_tag_0 = io_dec_uops_0_br_tag; // @[rename-stage.scala:160:7]
wire [4:0] io_dec_uops_0_ftq_idx_0 = io_dec_uops_0_ftq_idx; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_edge_inst_0 = io_dec_uops_0_edge_inst; // @[rename-stage.scala:160:7]
wire [5:0] io_dec_uops_0_pc_lob_0 = io_dec_uops_0_pc_lob; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_taken_0 = io_dec_uops_0_taken; // @[rename-stage.scala:160:7]
wire [19:0] io_dec_uops_0_imm_packed_0 = io_dec_uops_0_imm_packed; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_exception_0 = io_dec_uops_0_exception; // @[rename-stage.scala:160:7]
wire [63:0] io_dec_uops_0_exc_cause_0 = io_dec_uops_0_exc_cause; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_bypassable_0 = io_dec_uops_0_bypassable; // @[rename-stage.scala:160:7]
wire [4:0] io_dec_uops_0_mem_cmd_0 = io_dec_uops_0_mem_cmd; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_0_mem_size_0 = io_dec_uops_0_mem_size; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_mem_signed_0 = io_dec_uops_0_mem_signed; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_is_fence_0 = io_dec_uops_0_is_fence; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_is_fencei_0 = io_dec_uops_0_is_fencei; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_is_amo_0 = io_dec_uops_0_is_amo; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_uses_ldq_0 = io_dec_uops_0_uses_ldq; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_uses_stq_0 = io_dec_uops_0_uses_stq; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_is_sys_pc2epc_0 = io_dec_uops_0_is_sys_pc2epc; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_is_unique_0 = io_dec_uops_0_is_unique; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_flush_on_commit_0 = io_dec_uops_0_flush_on_commit; // @[rename-stage.scala:160:7]
wire [5:0] io_dec_uops_0_ldst_0 = io_dec_uops_0_ldst; // @[rename-stage.scala:160:7]
wire [5:0] io_dec_uops_0_lrs1_0 = io_dec_uops_0_lrs1; // @[rename-stage.scala:160:7]
wire [5:0] io_dec_uops_0_lrs2_0 = io_dec_uops_0_lrs2; // @[rename-stage.scala:160:7]
wire [5:0] io_dec_uops_0_lrs3_0 = io_dec_uops_0_lrs3; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_ldst_val_0 = io_dec_uops_0_ldst_val; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_0_dst_rtype_0 = io_dec_uops_0_dst_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_0_lrs1_rtype_0 = io_dec_uops_0_lrs1_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_0_lrs2_rtype_0 = io_dec_uops_0_lrs2_rtype; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_frs3_en_0 = io_dec_uops_0_frs3_en; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_fp_val_0 = io_dec_uops_0_fp_val; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_fp_single_0 = io_dec_uops_0_fp_single; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_xcpt_pf_if_0 = io_dec_uops_0_xcpt_pf_if; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_xcpt_ae_if_0 = io_dec_uops_0_xcpt_ae_if; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_bp_debug_if_0 = io_dec_uops_0_bp_debug_if; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_bp_xcpt_if_0 = io_dec_uops_0_bp_xcpt_if; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_0_debug_fsrc_0 = io_dec_uops_0_debug_fsrc; // @[rename-stage.scala:160:7]
wire [6:0] io_dec_uops_1_uopc_0 = io_dec_uops_1_uopc; // @[rename-stage.scala:160:7]
wire [31:0] io_dec_uops_1_inst_0 = io_dec_uops_1_inst; // @[rename-stage.scala:160:7]
wire [31:0] io_dec_uops_1_debug_inst_0 = io_dec_uops_1_debug_inst; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_is_rvc_0 = io_dec_uops_1_is_rvc; // @[rename-stage.scala:160:7]
wire [39:0] io_dec_uops_1_debug_pc_0 = io_dec_uops_1_debug_pc; // @[rename-stage.scala:160:7]
wire [2:0] io_dec_uops_1_iq_type_0 = io_dec_uops_1_iq_type; // @[rename-stage.scala:160:7]
wire [9:0] io_dec_uops_1_fu_code_0 = io_dec_uops_1_fu_code; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_is_br_0 = io_dec_uops_1_is_br; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_is_jalr_0 = io_dec_uops_1_is_jalr; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_is_jal_0 = io_dec_uops_1_is_jal; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_is_sfb_0 = io_dec_uops_1_is_sfb; // @[rename-stage.scala:160:7]
wire [15:0] io_dec_uops_1_br_mask_0 = io_dec_uops_1_br_mask; // @[rename-stage.scala:160:7]
wire [3:0] io_dec_uops_1_br_tag_0 = io_dec_uops_1_br_tag; // @[rename-stage.scala:160:7]
wire [4:0] io_dec_uops_1_ftq_idx_0 = io_dec_uops_1_ftq_idx; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_edge_inst_0 = io_dec_uops_1_edge_inst; // @[rename-stage.scala:160:7]
wire [5:0] io_dec_uops_1_pc_lob_0 = io_dec_uops_1_pc_lob; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_taken_0 = io_dec_uops_1_taken; // @[rename-stage.scala:160:7]
wire [19:0] io_dec_uops_1_imm_packed_0 = io_dec_uops_1_imm_packed; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_exception_0 = io_dec_uops_1_exception; // @[rename-stage.scala:160:7]
wire [63:0] io_dec_uops_1_exc_cause_0 = io_dec_uops_1_exc_cause; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_bypassable_0 = io_dec_uops_1_bypassable; // @[rename-stage.scala:160:7]
wire [4:0] io_dec_uops_1_mem_cmd_0 = io_dec_uops_1_mem_cmd; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_1_mem_size_0 = io_dec_uops_1_mem_size; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_mem_signed_0 = io_dec_uops_1_mem_signed; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_is_fence_0 = io_dec_uops_1_is_fence; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_is_fencei_0 = io_dec_uops_1_is_fencei; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_is_amo_0 = io_dec_uops_1_is_amo; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_uses_ldq_0 = io_dec_uops_1_uses_ldq; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_uses_stq_0 = io_dec_uops_1_uses_stq; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_is_sys_pc2epc_0 = io_dec_uops_1_is_sys_pc2epc; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_is_unique_0 = io_dec_uops_1_is_unique; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_flush_on_commit_0 = io_dec_uops_1_flush_on_commit; // @[rename-stage.scala:160:7]
wire [5:0] io_dec_uops_1_ldst_0 = io_dec_uops_1_ldst; // @[rename-stage.scala:160:7]
wire [5:0] io_dec_uops_1_lrs1_0 = io_dec_uops_1_lrs1; // @[rename-stage.scala:160:7]
wire [5:0] io_dec_uops_1_lrs2_0 = io_dec_uops_1_lrs2; // @[rename-stage.scala:160:7]
wire [5:0] io_dec_uops_1_lrs3_0 = io_dec_uops_1_lrs3; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_ldst_val_0 = io_dec_uops_1_ldst_val; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_1_dst_rtype_0 = io_dec_uops_1_dst_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_1_lrs1_rtype_0 = io_dec_uops_1_lrs1_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_1_lrs2_rtype_0 = io_dec_uops_1_lrs2_rtype; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_frs3_en_0 = io_dec_uops_1_frs3_en; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_fp_val_0 = io_dec_uops_1_fp_val; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_fp_single_0 = io_dec_uops_1_fp_single; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_xcpt_pf_if_0 = io_dec_uops_1_xcpt_pf_if; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_xcpt_ae_if_0 = io_dec_uops_1_xcpt_ae_if; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_bp_debug_if_0 = io_dec_uops_1_bp_debug_if; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_bp_xcpt_if_0 = io_dec_uops_1_bp_xcpt_if; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_1_debug_fsrc_0 = io_dec_uops_1_debug_fsrc; // @[rename-stage.scala:160:7]
wire [6:0] io_dec_uops_2_uopc_0 = io_dec_uops_2_uopc; // @[rename-stage.scala:160:7]
wire [31:0] io_dec_uops_2_inst_0 = io_dec_uops_2_inst; // @[rename-stage.scala:160:7]
wire [31:0] io_dec_uops_2_debug_inst_0 = io_dec_uops_2_debug_inst; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_is_rvc_0 = io_dec_uops_2_is_rvc; // @[rename-stage.scala:160:7]
wire [39:0] io_dec_uops_2_debug_pc_0 = io_dec_uops_2_debug_pc; // @[rename-stage.scala:160:7]
wire [2:0] io_dec_uops_2_iq_type_0 = io_dec_uops_2_iq_type; // @[rename-stage.scala:160:7]
wire [9:0] io_dec_uops_2_fu_code_0 = io_dec_uops_2_fu_code; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_is_br_0 = io_dec_uops_2_is_br; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_is_jalr_0 = io_dec_uops_2_is_jalr; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_is_jal_0 = io_dec_uops_2_is_jal; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_is_sfb_0 = io_dec_uops_2_is_sfb; // @[rename-stage.scala:160:7]
wire [15:0] io_dec_uops_2_br_mask_0 = io_dec_uops_2_br_mask; // @[rename-stage.scala:160:7]
wire [3:0] io_dec_uops_2_br_tag_0 = io_dec_uops_2_br_tag; // @[rename-stage.scala:160:7]
wire [4:0] io_dec_uops_2_ftq_idx_0 = io_dec_uops_2_ftq_idx; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_edge_inst_0 = io_dec_uops_2_edge_inst; // @[rename-stage.scala:160:7]
wire [5:0] io_dec_uops_2_pc_lob_0 = io_dec_uops_2_pc_lob; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_taken_0 = io_dec_uops_2_taken; // @[rename-stage.scala:160:7]
wire [19:0] io_dec_uops_2_imm_packed_0 = io_dec_uops_2_imm_packed; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_exception_0 = io_dec_uops_2_exception; // @[rename-stage.scala:160:7]
wire [63:0] io_dec_uops_2_exc_cause_0 = io_dec_uops_2_exc_cause; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_bypassable_0 = io_dec_uops_2_bypassable; // @[rename-stage.scala:160:7]
wire [4:0] io_dec_uops_2_mem_cmd_0 = io_dec_uops_2_mem_cmd; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_2_mem_size_0 = io_dec_uops_2_mem_size; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_mem_signed_0 = io_dec_uops_2_mem_signed; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_is_fence_0 = io_dec_uops_2_is_fence; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_is_fencei_0 = io_dec_uops_2_is_fencei; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_is_amo_0 = io_dec_uops_2_is_amo; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_uses_ldq_0 = io_dec_uops_2_uses_ldq; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_uses_stq_0 = io_dec_uops_2_uses_stq; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_is_sys_pc2epc_0 = io_dec_uops_2_is_sys_pc2epc; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_is_unique_0 = io_dec_uops_2_is_unique; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_flush_on_commit_0 = io_dec_uops_2_flush_on_commit; // @[rename-stage.scala:160:7]
wire [5:0] io_dec_uops_2_ldst_0 = io_dec_uops_2_ldst; // @[rename-stage.scala:160:7]
wire [5:0] io_dec_uops_2_lrs1_0 = io_dec_uops_2_lrs1; // @[rename-stage.scala:160:7]
wire [5:0] io_dec_uops_2_lrs2_0 = io_dec_uops_2_lrs2; // @[rename-stage.scala:160:7]
wire [5:0] io_dec_uops_2_lrs3_0 = io_dec_uops_2_lrs3; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_ldst_val_0 = io_dec_uops_2_ldst_val; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_2_dst_rtype_0 = io_dec_uops_2_dst_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_2_lrs1_rtype_0 = io_dec_uops_2_lrs1_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_2_lrs2_rtype_0 = io_dec_uops_2_lrs2_rtype; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_frs3_en_0 = io_dec_uops_2_frs3_en; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_fp_val_0 = io_dec_uops_2_fp_val; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_fp_single_0 = io_dec_uops_2_fp_single; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_xcpt_pf_if_0 = io_dec_uops_2_xcpt_pf_if; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_xcpt_ae_if_0 = io_dec_uops_2_xcpt_ae_if; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_bp_debug_if_0 = io_dec_uops_2_bp_debug_if; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_bp_xcpt_if_0 = io_dec_uops_2_bp_xcpt_if; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_2_debug_fsrc_0 = io_dec_uops_2_debug_fsrc; // @[rename-stage.scala:160:7]
wire [15:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[rename-stage.scala:160:7]
wire [15:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[rename-stage.scala:160:7]
wire [6:0] io_brupdate_b2_uop_uopc_0 = io_brupdate_b2_uop_uopc; // @[rename-stage.scala:160:7]
wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[rename-stage.scala:160:7]
wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[rename-stage.scala:160:7]
wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[rename-stage.scala:160:7]
wire [2:0] io_brupdate_b2_uop_iq_type_0 = io_brupdate_b2_uop_iq_type; // @[rename-stage.scala:160:7]
wire [9:0] io_brupdate_b2_uop_fu_code_0 = io_brupdate_b2_uop_fu_code; // @[rename-stage.scala:160:7]
wire [3:0] io_brupdate_b2_uop_ctrl_br_type_0 = io_brupdate_b2_uop_ctrl_br_type; // @[rename-stage.scala:160:7]
wire [1:0] io_brupdate_b2_uop_ctrl_op1_sel_0 = io_brupdate_b2_uop_ctrl_op1_sel; // @[rename-stage.scala:160:7]
wire [2:0] io_brupdate_b2_uop_ctrl_op2_sel_0 = io_brupdate_b2_uop_ctrl_op2_sel; // @[rename-stage.scala:160:7]
wire [2:0] io_brupdate_b2_uop_ctrl_imm_sel_0 = io_brupdate_b2_uop_ctrl_imm_sel; // @[rename-stage.scala:160:7]
wire [4:0] io_brupdate_b2_uop_ctrl_op_fcn_0 = io_brupdate_b2_uop_ctrl_op_fcn; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_ctrl_fcn_dw_0 = io_brupdate_b2_uop_ctrl_fcn_dw; // @[rename-stage.scala:160:7]
wire [2:0] io_brupdate_b2_uop_ctrl_csr_cmd_0 = io_brupdate_b2_uop_ctrl_csr_cmd; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_ctrl_is_load_0 = io_brupdate_b2_uop_ctrl_is_load; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_ctrl_is_sta_0 = io_brupdate_b2_uop_ctrl_is_sta; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_ctrl_is_std_0 = io_brupdate_b2_uop_ctrl_is_std; // @[rename-stage.scala:160:7]
wire [1:0] io_brupdate_b2_uop_iw_state_0 = io_brupdate_b2_uop_iw_state; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_iw_p1_poisoned_0 = io_brupdate_b2_uop_iw_p1_poisoned; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_iw_p2_poisoned_0 = io_brupdate_b2_uop_iw_p2_poisoned; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_is_br_0 = io_brupdate_b2_uop_is_br; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_is_jalr_0 = io_brupdate_b2_uop_is_jalr; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_is_jal_0 = io_brupdate_b2_uop_is_jal; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[rename-stage.scala:160:7]
wire [15:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[rename-stage.scala:160:7]
wire [3:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[rename-stage.scala:160:7]
wire [4:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[rename-stage.scala:160:7]
wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[rename-stage.scala:160:7]
wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[rename-stage.scala:160:7]
wire [11:0] io_brupdate_b2_uop_csr_addr_0 = io_brupdate_b2_uop_csr_addr; // @[rename-stage.scala:160:7]
wire [6:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[rename-stage.scala:160:7]
wire [4:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[rename-stage.scala:160:7]
wire [4:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[rename-stage.scala:160:7]
wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[rename-stage.scala:160:7]
wire [6:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[rename-stage.scala:160:7]
wire [6:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[rename-stage.scala:160:7]
wire [6:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[rename-stage.scala:160:7]
wire [6:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[rename-stage.scala:160:7]
wire [4:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[rename-stage.scala:160:7]
wire [6:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[rename-stage.scala:160:7]
wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_bypassable_0 = io_brupdate_b2_uop_bypassable; // @[rename-stage.scala:160:7]
wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[rename-stage.scala:160:7]
wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[rename-stage.scala:160:7]
wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[rename-stage.scala:160:7]
wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[rename-stage.scala:160:7]
wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[rename-stage.scala:160:7]
wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_ldst_val_0 = io_brupdate_b2_uop_ldst_val; // @[rename-stage.scala:160:7]
wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_fp_single_0 = io_brupdate_b2_uop_fp_single; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[rename-stage.scala:160:7]
wire [1:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[rename-stage.scala:160:7]
wire [1:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_valid_0 = io_brupdate_b2_valid; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[rename-stage.scala:160:7]
wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[rename-stage.scala:160:7]
wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[rename-stage.scala:160:7]
wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[rename-stage.scala:160:7]
wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[rename-stage.scala:160:7]
wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[rename-stage.scala:160:7]
wire io_dis_fire_0_0 = io_dis_fire_0; // @[rename-stage.scala:160:7]
wire io_dis_fire_1_0 = io_dis_fire_1; // @[rename-stage.scala:160:7]
wire io_dis_fire_2_0 = io_dis_fire_2; // @[rename-stage.scala:160:7]
wire io_dis_ready_0 = io_dis_ready; // @[rename-stage.scala:160:7]
wire io_wakeups_0_valid_0 = io_wakeups_0_valid; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_0_bits_uop_uopc_0 = io_wakeups_0_bits_uop_uopc; // @[rename-stage.scala:160:7]
wire [31:0] io_wakeups_0_bits_uop_inst_0 = io_wakeups_0_bits_uop_inst; // @[rename-stage.scala:160:7]
wire [31:0] io_wakeups_0_bits_uop_debug_inst_0 = io_wakeups_0_bits_uop_debug_inst; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_is_rvc_0 = io_wakeups_0_bits_uop_is_rvc; // @[rename-stage.scala:160:7]
wire [39:0] io_wakeups_0_bits_uop_debug_pc_0 = io_wakeups_0_bits_uop_debug_pc; // @[rename-stage.scala:160:7]
wire [2:0] io_wakeups_0_bits_uop_iq_type_0 = io_wakeups_0_bits_uop_iq_type; // @[rename-stage.scala:160:7]
wire [9:0] io_wakeups_0_bits_uop_fu_code_0 = io_wakeups_0_bits_uop_fu_code; // @[rename-stage.scala:160:7]
wire [3:0] io_wakeups_0_bits_uop_ctrl_br_type_0 = io_wakeups_0_bits_uop_ctrl_br_type; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_0_bits_uop_ctrl_op1_sel_0 = io_wakeups_0_bits_uop_ctrl_op1_sel; // @[rename-stage.scala:160:7]
wire [2:0] io_wakeups_0_bits_uop_ctrl_op2_sel_0 = io_wakeups_0_bits_uop_ctrl_op2_sel; // @[rename-stage.scala:160:7]
wire [2:0] io_wakeups_0_bits_uop_ctrl_imm_sel_0 = io_wakeups_0_bits_uop_ctrl_imm_sel; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_0_bits_uop_ctrl_op_fcn_0 = io_wakeups_0_bits_uop_ctrl_op_fcn; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_ctrl_fcn_dw_0 = io_wakeups_0_bits_uop_ctrl_fcn_dw; // @[rename-stage.scala:160:7]
wire [2:0] io_wakeups_0_bits_uop_ctrl_csr_cmd_0 = io_wakeups_0_bits_uop_ctrl_csr_cmd; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_ctrl_is_load_0 = io_wakeups_0_bits_uop_ctrl_is_load; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_ctrl_is_sta_0 = io_wakeups_0_bits_uop_ctrl_is_sta; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_ctrl_is_std_0 = io_wakeups_0_bits_uop_ctrl_is_std; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_0_bits_uop_iw_state_0 = io_wakeups_0_bits_uop_iw_state; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_iw_p1_poisoned_0 = io_wakeups_0_bits_uop_iw_p1_poisoned; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_iw_p2_poisoned_0 = io_wakeups_0_bits_uop_iw_p2_poisoned; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_is_br_0 = io_wakeups_0_bits_uop_is_br; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_is_jalr_0 = io_wakeups_0_bits_uop_is_jalr; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_is_jal_0 = io_wakeups_0_bits_uop_is_jal; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_is_sfb_0 = io_wakeups_0_bits_uop_is_sfb; // @[rename-stage.scala:160:7]
wire [15:0] io_wakeups_0_bits_uop_br_mask_0 = io_wakeups_0_bits_uop_br_mask; // @[rename-stage.scala:160:7]
wire [3:0] io_wakeups_0_bits_uop_br_tag_0 = io_wakeups_0_bits_uop_br_tag; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_0_bits_uop_ftq_idx_0 = io_wakeups_0_bits_uop_ftq_idx; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_edge_inst_0 = io_wakeups_0_bits_uop_edge_inst; // @[rename-stage.scala:160:7]
wire [5:0] io_wakeups_0_bits_uop_pc_lob_0 = io_wakeups_0_bits_uop_pc_lob; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_taken_0 = io_wakeups_0_bits_uop_taken; // @[rename-stage.scala:160:7]
wire [19:0] io_wakeups_0_bits_uop_imm_packed_0 = io_wakeups_0_bits_uop_imm_packed; // @[rename-stage.scala:160:7]
wire [11:0] io_wakeups_0_bits_uop_csr_addr_0 = io_wakeups_0_bits_uop_csr_addr; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_0_bits_uop_rob_idx_0 = io_wakeups_0_bits_uop_rob_idx; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_0_bits_uop_ldq_idx_0 = io_wakeups_0_bits_uop_ldq_idx; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_0_bits_uop_stq_idx_0 = io_wakeups_0_bits_uop_stq_idx; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_0_bits_uop_rxq_idx_0 = io_wakeups_0_bits_uop_rxq_idx; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_0_bits_uop_pdst_0 = io_wakeups_0_bits_uop_pdst; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_0_bits_uop_prs1_0 = io_wakeups_0_bits_uop_prs1; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_0_bits_uop_prs2_0 = io_wakeups_0_bits_uop_prs2; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_0_bits_uop_prs3_0 = io_wakeups_0_bits_uop_prs3; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_0_bits_uop_ppred_0 = io_wakeups_0_bits_uop_ppred; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_prs1_busy_0 = io_wakeups_0_bits_uop_prs1_busy; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_prs2_busy_0 = io_wakeups_0_bits_uop_prs2_busy; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_prs3_busy_0 = io_wakeups_0_bits_uop_prs3_busy; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_ppred_busy_0 = io_wakeups_0_bits_uop_ppred_busy; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_0_bits_uop_stale_pdst_0 = io_wakeups_0_bits_uop_stale_pdst; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_exception_0 = io_wakeups_0_bits_uop_exception; // @[rename-stage.scala:160:7]
wire [63:0] io_wakeups_0_bits_uop_exc_cause_0 = io_wakeups_0_bits_uop_exc_cause; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_bypassable_0 = io_wakeups_0_bits_uop_bypassable; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_0_bits_uop_mem_cmd_0 = io_wakeups_0_bits_uop_mem_cmd; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_0_bits_uop_mem_size_0 = io_wakeups_0_bits_uop_mem_size; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_mem_signed_0 = io_wakeups_0_bits_uop_mem_signed; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_is_fence_0 = io_wakeups_0_bits_uop_is_fence; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_is_fencei_0 = io_wakeups_0_bits_uop_is_fencei; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_is_amo_0 = io_wakeups_0_bits_uop_is_amo; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_uses_ldq_0 = io_wakeups_0_bits_uop_uses_ldq; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_uses_stq_0 = io_wakeups_0_bits_uop_uses_stq; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_is_sys_pc2epc_0 = io_wakeups_0_bits_uop_is_sys_pc2epc; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_is_unique_0 = io_wakeups_0_bits_uop_is_unique; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_flush_on_commit_0 = io_wakeups_0_bits_uop_flush_on_commit; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_ldst_is_rs1_0 = io_wakeups_0_bits_uop_ldst_is_rs1; // @[rename-stage.scala:160:7]
wire [5:0] io_wakeups_0_bits_uop_ldst_0 = io_wakeups_0_bits_uop_ldst; // @[rename-stage.scala:160:7]
wire [5:0] io_wakeups_0_bits_uop_lrs1_0 = io_wakeups_0_bits_uop_lrs1; // @[rename-stage.scala:160:7]
wire [5:0] io_wakeups_0_bits_uop_lrs2_0 = io_wakeups_0_bits_uop_lrs2; // @[rename-stage.scala:160:7]
wire [5:0] io_wakeups_0_bits_uop_lrs3_0 = io_wakeups_0_bits_uop_lrs3; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_ldst_val_0 = io_wakeups_0_bits_uop_ldst_val; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_0_bits_uop_dst_rtype_0 = io_wakeups_0_bits_uop_dst_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_0_bits_uop_lrs1_rtype_0 = io_wakeups_0_bits_uop_lrs1_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_0_bits_uop_lrs2_rtype_0 = io_wakeups_0_bits_uop_lrs2_rtype; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_frs3_en_0 = io_wakeups_0_bits_uop_frs3_en; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_fp_val_0 = io_wakeups_0_bits_uop_fp_val; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_fp_single_0 = io_wakeups_0_bits_uop_fp_single; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_xcpt_pf_if_0 = io_wakeups_0_bits_uop_xcpt_pf_if; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_xcpt_ae_if_0 = io_wakeups_0_bits_uop_xcpt_ae_if; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_xcpt_ma_if_0 = io_wakeups_0_bits_uop_xcpt_ma_if; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_bp_debug_if_0 = io_wakeups_0_bits_uop_bp_debug_if; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_uop_bp_xcpt_if_0 = io_wakeups_0_bits_uop_bp_xcpt_if; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_0_bits_uop_debug_fsrc_0 = io_wakeups_0_bits_uop_debug_fsrc; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_0_bits_uop_debug_tsrc_0 = io_wakeups_0_bits_uop_debug_tsrc; // @[rename-stage.scala:160:7]
wire [63:0] io_wakeups_0_bits_data_0 = io_wakeups_0_bits_data; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_predicated_0 = io_wakeups_0_bits_predicated; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_valid_0 = io_wakeups_0_bits_fflags_valid; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_0_bits_fflags_bits_uop_uopc_0 = io_wakeups_0_bits_fflags_bits_uop_uopc; // @[rename-stage.scala:160:7]
wire [31:0] io_wakeups_0_bits_fflags_bits_uop_inst_0 = io_wakeups_0_bits_fflags_bits_uop_inst; // @[rename-stage.scala:160:7]
wire [31:0] io_wakeups_0_bits_fflags_bits_uop_debug_inst_0 = io_wakeups_0_bits_fflags_bits_uop_debug_inst; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_rvc_0 = io_wakeups_0_bits_fflags_bits_uop_is_rvc; // @[rename-stage.scala:160:7]
wire [39:0] io_wakeups_0_bits_fflags_bits_uop_debug_pc_0 = io_wakeups_0_bits_fflags_bits_uop_debug_pc; // @[rename-stage.scala:160:7]
wire [2:0] io_wakeups_0_bits_fflags_bits_uop_iq_type_0 = io_wakeups_0_bits_fflags_bits_uop_iq_type; // @[rename-stage.scala:160:7]
wire [9:0] io_wakeups_0_bits_fflags_bits_uop_fu_code_0 = io_wakeups_0_bits_fflags_bits_uop_fu_code; // @[rename-stage.scala:160:7]
wire [3:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_br_type_0 = io_wakeups_0_bits_fflags_bits_uop_ctrl_br_type; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_op1_sel_0 = io_wakeups_0_bits_fflags_bits_uop_ctrl_op1_sel; // @[rename-stage.scala:160:7]
wire [2:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_op2_sel_0 = io_wakeups_0_bits_fflags_bits_uop_ctrl_op2_sel; // @[rename-stage.scala:160:7]
wire [2:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_imm_sel_0 = io_wakeups_0_bits_fflags_bits_uop_ctrl_imm_sel; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_op_fcn_0 = io_wakeups_0_bits_fflags_bits_uop_ctrl_op_fcn; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_ctrl_fcn_dw_0 = io_wakeups_0_bits_fflags_bits_uop_ctrl_fcn_dw; // @[rename-stage.scala:160:7]
wire [2:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_csr_cmd_0 = io_wakeups_0_bits_fflags_bits_uop_ctrl_csr_cmd; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_ctrl_is_load_0 = io_wakeups_0_bits_fflags_bits_uop_ctrl_is_load; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_ctrl_is_sta_0 = io_wakeups_0_bits_fflags_bits_uop_ctrl_is_sta; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_ctrl_is_std_0 = io_wakeups_0_bits_fflags_bits_uop_ctrl_is_std; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_iw_state_0 = io_wakeups_0_bits_fflags_bits_uop_iw_state; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_iw_p1_poisoned_0 = io_wakeups_0_bits_fflags_bits_uop_iw_p1_poisoned; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_iw_p2_poisoned_0 = io_wakeups_0_bits_fflags_bits_uop_iw_p2_poisoned; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_br_0 = io_wakeups_0_bits_fflags_bits_uop_is_br; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_jalr_0 = io_wakeups_0_bits_fflags_bits_uop_is_jalr; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_jal_0 = io_wakeups_0_bits_fflags_bits_uop_is_jal; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_sfb_0 = io_wakeups_0_bits_fflags_bits_uop_is_sfb; // @[rename-stage.scala:160:7]
wire [15:0] io_wakeups_0_bits_fflags_bits_uop_br_mask_0 = io_wakeups_0_bits_fflags_bits_uop_br_mask; // @[rename-stage.scala:160:7]
wire [3:0] io_wakeups_0_bits_fflags_bits_uop_br_tag_0 = io_wakeups_0_bits_fflags_bits_uop_br_tag; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_0_bits_fflags_bits_uop_ftq_idx_0 = io_wakeups_0_bits_fflags_bits_uop_ftq_idx; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_edge_inst_0 = io_wakeups_0_bits_fflags_bits_uop_edge_inst; // @[rename-stage.scala:160:7]
wire [5:0] io_wakeups_0_bits_fflags_bits_uop_pc_lob_0 = io_wakeups_0_bits_fflags_bits_uop_pc_lob; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_taken_0 = io_wakeups_0_bits_fflags_bits_uop_taken; // @[rename-stage.scala:160:7]
wire [19:0] io_wakeups_0_bits_fflags_bits_uop_imm_packed_0 = io_wakeups_0_bits_fflags_bits_uop_imm_packed; // @[rename-stage.scala:160:7]
wire [11:0] io_wakeups_0_bits_fflags_bits_uop_csr_addr_0 = io_wakeups_0_bits_fflags_bits_uop_csr_addr; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_0_bits_fflags_bits_uop_rob_idx_0 = io_wakeups_0_bits_fflags_bits_uop_rob_idx; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_0_bits_fflags_bits_uop_ldq_idx_0 = io_wakeups_0_bits_fflags_bits_uop_ldq_idx; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_0_bits_fflags_bits_uop_stq_idx_0 = io_wakeups_0_bits_fflags_bits_uop_stq_idx; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_rxq_idx_0 = io_wakeups_0_bits_fflags_bits_uop_rxq_idx; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_0_bits_fflags_bits_uop_pdst_0 = io_wakeups_0_bits_fflags_bits_uop_pdst; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_0_bits_fflags_bits_uop_prs1_0 = io_wakeups_0_bits_fflags_bits_uop_prs1; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_0_bits_fflags_bits_uop_prs2_0 = io_wakeups_0_bits_fflags_bits_uop_prs2; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_0_bits_fflags_bits_uop_prs3_0 = io_wakeups_0_bits_fflags_bits_uop_prs3; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_0_bits_fflags_bits_uop_ppred_0 = io_wakeups_0_bits_fflags_bits_uop_ppred; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_prs1_busy_0 = io_wakeups_0_bits_fflags_bits_uop_prs1_busy; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_prs2_busy_0 = io_wakeups_0_bits_fflags_bits_uop_prs2_busy; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_prs3_busy_0 = io_wakeups_0_bits_fflags_bits_uop_prs3_busy; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_ppred_busy_0 = io_wakeups_0_bits_fflags_bits_uop_ppred_busy; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_0_bits_fflags_bits_uop_stale_pdst_0 = io_wakeups_0_bits_fflags_bits_uop_stale_pdst; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_exception_0 = io_wakeups_0_bits_fflags_bits_uop_exception; // @[rename-stage.scala:160:7]
wire [63:0] io_wakeups_0_bits_fflags_bits_uop_exc_cause_0 = io_wakeups_0_bits_fflags_bits_uop_exc_cause; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_bypassable_0 = io_wakeups_0_bits_fflags_bits_uop_bypassable; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_0_bits_fflags_bits_uop_mem_cmd_0 = io_wakeups_0_bits_fflags_bits_uop_mem_cmd; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_mem_size_0 = io_wakeups_0_bits_fflags_bits_uop_mem_size; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_mem_signed_0 = io_wakeups_0_bits_fflags_bits_uop_mem_signed; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_fence_0 = io_wakeups_0_bits_fflags_bits_uop_is_fence; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_fencei_0 = io_wakeups_0_bits_fflags_bits_uop_is_fencei; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_amo_0 = io_wakeups_0_bits_fflags_bits_uop_is_amo; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_uses_ldq_0 = io_wakeups_0_bits_fflags_bits_uop_uses_ldq; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_uses_stq_0 = io_wakeups_0_bits_fflags_bits_uop_uses_stq; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_sys_pc2epc_0 = io_wakeups_0_bits_fflags_bits_uop_is_sys_pc2epc; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_unique_0 = io_wakeups_0_bits_fflags_bits_uop_is_unique; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_flush_on_commit_0 = io_wakeups_0_bits_fflags_bits_uop_flush_on_commit; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_ldst_is_rs1_0 = io_wakeups_0_bits_fflags_bits_uop_ldst_is_rs1; // @[rename-stage.scala:160:7]
wire [5:0] io_wakeups_0_bits_fflags_bits_uop_ldst_0 = io_wakeups_0_bits_fflags_bits_uop_ldst; // @[rename-stage.scala:160:7]
wire [5:0] io_wakeups_0_bits_fflags_bits_uop_lrs1_0 = io_wakeups_0_bits_fflags_bits_uop_lrs1; // @[rename-stage.scala:160:7]
wire [5:0] io_wakeups_0_bits_fflags_bits_uop_lrs2_0 = io_wakeups_0_bits_fflags_bits_uop_lrs2; // @[rename-stage.scala:160:7]
wire [5:0] io_wakeups_0_bits_fflags_bits_uop_lrs3_0 = io_wakeups_0_bits_fflags_bits_uop_lrs3; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_ldst_val_0 = io_wakeups_0_bits_fflags_bits_uop_ldst_val; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_dst_rtype_0 = io_wakeups_0_bits_fflags_bits_uop_dst_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_lrs1_rtype_0 = io_wakeups_0_bits_fflags_bits_uop_lrs1_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_lrs2_rtype_0 = io_wakeups_0_bits_fflags_bits_uop_lrs2_rtype; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_frs3_en_0 = io_wakeups_0_bits_fflags_bits_uop_frs3_en; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_fp_val_0 = io_wakeups_0_bits_fflags_bits_uop_fp_val; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_fp_single_0 = io_wakeups_0_bits_fflags_bits_uop_fp_single; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_xcpt_pf_if_0 = io_wakeups_0_bits_fflags_bits_uop_xcpt_pf_if; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_xcpt_ae_if_0 = io_wakeups_0_bits_fflags_bits_uop_xcpt_ae_if; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_xcpt_ma_if_0 = io_wakeups_0_bits_fflags_bits_uop_xcpt_ma_if; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_bp_debug_if_0 = io_wakeups_0_bits_fflags_bits_uop_bp_debug_if; // @[rename-stage.scala:160:7]
wire io_wakeups_0_bits_fflags_bits_uop_bp_xcpt_if_0 = io_wakeups_0_bits_fflags_bits_uop_bp_xcpt_if; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_debug_fsrc_0 = io_wakeups_0_bits_fflags_bits_uop_debug_fsrc; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_debug_tsrc_0 = io_wakeups_0_bits_fflags_bits_uop_debug_tsrc; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_0_bits_fflags_bits_flags_0 = io_wakeups_0_bits_fflags_bits_flags; // @[rename-stage.scala:160:7]
wire io_wakeups_1_valid_0 = io_wakeups_1_valid; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_1_bits_uop_uopc_0 = io_wakeups_1_bits_uop_uopc; // @[rename-stage.scala:160:7]
wire [31:0] io_wakeups_1_bits_uop_inst_0 = io_wakeups_1_bits_uop_inst; // @[rename-stage.scala:160:7]
wire [31:0] io_wakeups_1_bits_uop_debug_inst_0 = io_wakeups_1_bits_uop_debug_inst; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_is_rvc_0 = io_wakeups_1_bits_uop_is_rvc; // @[rename-stage.scala:160:7]
wire [39:0] io_wakeups_1_bits_uop_debug_pc_0 = io_wakeups_1_bits_uop_debug_pc; // @[rename-stage.scala:160:7]
wire [2:0] io_wakeups_1_bits_uop_iq_type_0 = io_wakeups_1_bits_uop_iq_type; // @[rename-stage.scala:160:7]
wire [9:0] io_wakeups_1_bits_uop_fu_code_0 = io_wakeups_1_bits_uop_fu_code; // @[rename-stage.scala:160:7]
wire [3:0] io_wakeups_1_bits_uop_ctrl_br_type_0 = io_wakeups_1_bits_uop_ctrl_br_type; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_1_bits_uop_ctrl_op1_sel_0 = io_wakeups_1_bits_uop_ctrl_op1_sel; // @[rename-stage.scala:160:7]
wire [2:0] io_wakeups_1_bits_uop_ctrl_op2_sel_0 = io_wakeups_1_bits_uop_ctrl_op2_sel; // @[rename-stage.scala:160:7]
wire [2:0] io_wakeups_1_bits_uop_ctrl_imm_sel_0 = io_wakeups_1_bits_uop_ctrl_imm_sel; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_1_bits_uop_ctrl_op_fcn_0 = io_wakeups_1_bits_uop_ctrl_op_fcn; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_ctrl_fcn_dw_0 = io_wakeups_1_bits_uop_ctrl_fcn_dw; // @[rename-stage.scala:160:7]
wire [2:0] io_wakeups_1_bits_uop_ctrl_csr_cmd_0 = io_wakeups_1_bits_uop_ctrl_csr_cmd; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_ctrl_is_load_0 = io_wakeups_1_bits_uop_ctrl_is_load; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_ctrl_is_sta_0 = io_wakeups_1_bits_uop_ctrl_is_sta; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_ctrl_is_std_0 = io_wakeups_1_bits_uop_ctrl_is_std; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_1_bits_uop_iw_state_0 = io_wakeups_1_bits_uop_iw_state; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_iw_p1_poisoned_0 = io_wakeups_1_bits_uop_iw_p1_poisoned; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_iw_p2_poisoned_0 = io_wakeups_1_bits_uop_iw_p2_poisoned; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_is_br_0 = io_wakeups_1_bits_uop_is_br; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_is_jalr_0 = io_wakeups_1_bits_uop_is_jalr; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_is_jal_0 = io_wakeups_1_bits_uop_is_jal; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_is_sfb_0 = io_wakeups_1_bits_uop_is_sfb; // @[rename-stage.scala:160:7]
wire [15:0] io_wakeups_1_bits_uop_br_mask_0 = io_wakeups_1_bits_uop_br_mask; // @[rename-stage.scala:160:7]
wire [3:0] io_wakeups_1_bits_uop_br_tag_0 = io_wakeups_1_bits_uop_br_tag; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_1_bits_uop_ftq_idx_0 = io_wakeups_1_bits_uop_ftq_idx; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_edge_inst_0 = io_wakeups_1_bits_uop_edge_inst; // @[rename-stage.scala:160:7]
wire [5:0] io_wakeups_1_bits_uop_pc_lob_0 = io_wakeups_1_bits_uop_pc_lob; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_taken_0 = io_wakeups_1_bits_uop_taken; // @[rename-stage.scala:160:7]
wire [19:0] io_wakeups_1_bits_uop_imm_packed_0 = io_wakeups_1_bits_uop_imm_packed; // @[rename-stage.scala:160:7]
wire [11:0] io_wakeups_1_bits_uop_csr_addr_0 = io_wakeups_1_bits_uop_csr_addr; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_1_bits_uop_rob_idx_0 = io_wakeups_1_bits_uop_rob_idx; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_1_bits_uop_ldq_idx_0 = io_wakeups_1_bits_uop_ldq_idx; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_1_bits_uop_stq_idx_0 = io_wakeups_1_bits_uop_stq_idx; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_1_bits_uop_rxq_idx_0 = io_wakeups_1_bits_uop_rxq_idx; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_1_bits_uop_pdst_0 = io_wakeups_1_bits_uop_pdst; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_1_bits_uop_prs1_0 = io_wakeups_1_bits_uop_prs1; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_1_bits_uop_prs2_0 = io_wakeups_1_bits_uop_prs2; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_1_bits_uop_prs3_0 = io_wakeups_1_bits_uop_prs3; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_1_bits_uop_ppred_0 = io_wakeups_1_bits_uop_ppred; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_prs1_busy_0 = io_wakeups_1_bits_uop_prs1_busy; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_prs2_busy_0 = io_wakeups_1_bits_uop_prs2_busy; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_prs3_busy_0 = io_wakeups_1_bits_uop_prs3_busy; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_ppred_busy_0 = io_wakeups_1_bits_uop_ppred_busy; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_1_bits_uop_stale_pdst_0 = io_wakeups_1_bits_uop_stale_pdst; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_exception_0 = io_wakeups_1_bits_uop_exception; // @[rename-stage.scala:160:7]
wire [63:0] io_wakeups_1_bits_uop_exc_cause_0 = io_wakeups_1_bits_uop_exc_cause; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_bypassable_0 = io_wakeups_1_bits_uop_bypassable; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_1_bits_uop_mem_cmd_0 = io_wakeups_1_bits_uop_mem_cmd; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_1_bits_uop_mem_size_0 = io_wakeups_1_bits_uop_mem_size; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_mem_signed_0 = io_wakeups_1_bits_uop_mem_signed; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_is_fence_0 = io_wakeups_1_bits_uop_is_fence; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_is_fencei_0 = io_wakeups_1_bits_uop_is_fencei; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_is_amo_0 = io_wakeups_1_bits_uop_is_amo; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_uses_ldq_0 = io_wakeups_1_bits_uop_uses_ldq; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_uses_stq_0 = io_wakeups_1_bits_uop_uses_stq; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_is_sys_pc2epc_0 = io_wakeups_1_bits_uop_is_sys_pc2epc; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_is_unique_0 = io_wakeups_1_bits_uop_is_unique; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_flush_on_commit_0 = io_wakeups_1_bits_uop_flush_on_commit; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_ldst_is_rs1_0 = io_wakeups_1_bits_uop_ldst_is_rs1; // @[rename-stage.scala:160:7]
wire [5:0] io_wakeups_1_bits_uop_ldst_0 = io_wakeups_1_bits_uop_ldst; // @[rename-stage.scala:160:7]
wire [5:0] io_wakeups_1_bits_uop_lrs1_0 = io_wakeups_1_bits_uop_lrs1; // @[rename-stage.scala:160:7]
wire [5:0] io_wakeups_1_bits_uop_lrs2_0 = io_wakeups_1_bits_uop_lrs2; // @[rename-stage.scala:160:7]
wire [5:0] io_wakeups_1_bits_uop_lrs3_0 = io_wakeups_1_bits_uop_lrs3; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_ldst_val_0 = io_wakeups_1_bits_uop_ldst_val; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_1_bits_uop_dst_rtype_0 = io_wakeups_1_bits_uop_dst_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_1_bits_uop_lrs1_rtype_0 = io_wakeups_1_bits_uop_lrs1_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_1_bits_uop_lrs2_rtype_0 = io_wakeups_1_bits_uop_lrs2_rtype; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_frs3_en_0 = io_wakeups_1_bits_uop_frs3_en; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_fp_val_0 = io_wakeups_1_bits_uop_fp_val; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_fp_single_0 = io_wakeups_1_bits_uop_fp_single; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_xcpt_pf_if_0 = io_wakeups_1_bits_uop_xcpt_pf_if; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_xcpt_ae_if_0 = io_wakeups_1_bits_uop_xcpt_ae_if; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_xcpt_ma_if_0 = io_wakeups_1_bits_uop_xcpt_ma_if; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_bp_debug_if_0 = io_wakeups_1_bits_uop_bp_debug_if; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_uop_bp_xcpt_if_0 = io_wakeups_1_bits_uop_bp_xcpt_if; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_1_bits_uop_debug_fsrc_0 = io_wakeups_1_bits_uop_debug_fsrc; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_1_bits_uop_debug_tsrc_0 = io_wakeups_1_bits_uop_debug_tsrc; // @[rename-stage.scala:160:7]
wire [63:0] io_wakeups_1_bits_data_0 = io_wakeups_1_bits_data; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_valid_0 = io_wakeups_1_bits_fflags_valid; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_1_bits_fflags_bits_uop_uopc_0 = io_wakeups_1_bits_fflags_bits_uop_uopc; // @[rename-stage.scala:160:7]
wire [31:0] io_wakeups_1_bits_fflags_bits_uop_inst_0 = io_wakeups_1_bits_fflags_bits_uop_inst; // @[rename-stage.scala:160:7]
wire [31:0] io_wakeups_1_bits_fflags_bits_uop_debug_inst_0 = io_wakeups_1_bits_fflags_bits_uop_debug_inst; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_is_rvc_0 = io_wakeups_1_bits_fflags_bits_uop_is_rvc; // @[rename-stage.scala:160:7]
wire [39:0] io_wakeups_1_bits_fflags_bits_uop_debug_pc_0 = io_wakeups_1_bits_fflags_bits_uop_debug_pc; // @[rename-stage.scala:160:7]
wire [2:0] io_wakeups_1_bits_fflags_bits_uop_iq_type_0 = io_wakeups_1_bits_fflags_bits_uop_iq_type; // @[rename-stage.scala:160:7]
wire [9:0] io_wakeups_1_bits_fflags_bits_uop_fu_code_0 = io_wakeups_1_bits_fflags_bits_uop_fu_code; // @[rename-stage.scala:160:7]
wire [3:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_br_type_0 = io_wakeups_1_bits_fflags_bits_uop_ctrl_br_type; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_op1_sel_0 = io_wakeups_1_bits_fflags_bits_uop_ctrl_op1_sel; // @[rename-stage.scala:160:7]
wire [2:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_op2_sel_0 = io_wakeups_1_bits_fflags_bits_uop_ctrl_op2_sel; // @[rename-stage.scala:160:7]
wire [2:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_imm_sel_0 = io_wakeups_1_bits_fflags_bits_uop_ctrl_imm_sel; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_op_fcn_0 = io_wakeups_1_bits_fflags_bits_uop_ctrl_op_fcn; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_ctrl_fcn_dw_0 = io_wakeups_1_bits_fflags_bits_uop_ctrl_fcn_dw; // @[rename-stage.scala:160:7]
wire [2:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_csr_cmd_0 = io_wakeups_1_bits_fflags_bits_uop_ctrl_csr_cmd; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_ctrl_is_load_0 = io_wakeups_1_bits_fflags_bits_uop_ctrl_is_load; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_ctrl_is_sta_0 = io_wakeups_1_bits_fflags_bits_uop_ctrl_is_sta; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_ctrl_is_std_0 = io_wakeups_1_bits_fflags_bits_uop_ctrl_is_std; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_1_bits_fflags_bits_uop_iw_state_0 = io_wakeups_1_bits_fflags_bits_uop_iw_state; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_iw_p1_poisoned_0 = io_wakeups_1_bits_fflags_bits_uop_iw_p1_poisoned; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_iw_p2_poisoned_0 = io_wakeups_1_bits_fflags_bits_uop_iw_p2_poisoned; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_is_br_0 = io_wakeups_1_bits_fflags_bits_uop_is_br; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_is_jalr_0 = io_wakeups_1_bits_fflags_bits_uop_is_jalr; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_is_jal_0 = io_wakeups_1_bits_fflags_bits_uop_is_jal; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_is_sfb_0 = io_wakeups_1_bits_fflags_bits_uop_is_sfb; // @[rename-stage.scala:160:7]
wire [15:0] io_wakeups_1_bits_fflags_bits_uop_br_mask_0 = io_wakeups_1_bits_fflags_bits_uop_br_mask; // @[rename-stage.scala:160:7]
wire [3:0] io_wakeups_1_bits_fflags_bits_uop_br_tag_0 = io_wakeups_1_bits_fflags_bits_uop_br_tag; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_1_bits_fflags_bits_uop_ftq_idx_0 = io_wakeups_1_bits_fflags_bits_uop_ftq_idx; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_edge_inst_0 = io_wakeups_1_bits_fflags_bits_uop_edge_inst; // @[rename-stage.scala:160:7]
wire [5:0] io_wakeups_1_bits_fflags_bits_uop_pc_lob_0 = io_wakeups_1_bits_fflags_bits_uop_pc_lob; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_taken_0 = io_wakeups_1_bits_fflags_bits_uop_taken; // @[rename-stage.scala:160:7]
wire [19:0] io_wakeups_1_bits_fflags_bits_uop_imm_packed_0 = io_wakeups_1_bits_fflags_bits_uop_imm_packed; // @[rename-stage.scala:160:7]
wire [11:0] io_wakeups_1_bits_fflags_bits_uop_csr_addr_0 = io_wakeups_1_bits_fflags_bits_uop_csr_addr; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_1_bits_fflags_bits_uop_rob_idx_0 = io_wakeups_1_bits_fflags_bits_uop_rob_idx; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_1_bits_fflags_bits_uop_ldq_idx_0 = io_wakeups_1_bits_fflags_bits_uop_ldq_idx; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_1_bits_fflags_bits_uop_stq_idx_0 = io_wakeups_1_bits_fflags_bits_uop_stq_idx; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_1_bits_fflags_bits_uop_rxq_idx_0 = io_wakeups_1_bits_fflags_bits_uop_rxq_idx; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_1_bits_fflags_bits_uop_pdst_0 = io_wakeups_1_bits_fflags_bits_uop_pdst; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_1_bits_fflags_bits_uop_prs1_0 = io_wakeups_1_bits_fflags_bits_uop_prs1; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_1_bits_fflags_bits_uop_prs2_0 = io_wakeups_1_bits_fflags_bits_uop_prs2; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_1_bits_fflags_bits_uop_prs3_0 = io_wakeups_1_bits_fflags_bits_uop_prs3; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_1_bits_fflags_bits_uop_ppred_0 = io_wakeups_1_bits_fflags_bits_uop_ppred; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_prs1_busy_0 = io_wakeups_1_bits_fflags_bits_uop_prs1_busy; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_prs2_busy_0 = io_wakeups_1_bits_fflags_bits_uop_prs2_busy; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_prs3_busy_0 = io_wakeups_1_bits_fflags_bits_uop_prs3_busy; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_ppred_busy_0 = io_wakeups_1_bits_fflags_bits_uop_ppred_busy; // @[rename-stage.scala:160:7]
wire [6:0] io_wakeups_1_bits_fflags_bits_uop_stale_pdst_0 = io_wakeups_1_bits_fflags_bits_uop_stale_pdst; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_exception_0 = io_wakeups_1_bits_fflags_bits_uop_exception; // @[rename-stage.scala:160:7]
wire [63:0] io_wakeups_1_bits_fflags_bits_uop_exc_cause_0 = io_wakeups_1_bits_fflags_bits_uop_exc_cause; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_bypassable_0 = io_wakeups_1_bits_fflags_bits_uop_bypassable; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_1_bits_fflags_bits_uop_mem_cmd_0 = io_wakeups_1_bits_fflags_bits_uop_mem_cmd; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_1_bits_fflags_bits_uop_mem_size_0 = io_wakeups_1_bits_fflags_bits_uop_mem_size; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_mem_signed_0 = io_wakeups_1_bits_fflags_bits_uop_mem_signed; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_is_fence_0 = io_wakeups_1_bits_fflags_bits_uop_is_fence; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_is_fencei_0 = io_wakeups_1_bits_fflags_bits_uop_is_fencei; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_is_amo_0 = io_wakeups_1_bits_fflags_bits_uop_is_amo; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_uses_ldq_0 = io_wakeups_1_bits_fflags_bits_uop_uses_ldq; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_uses_stq_0 = io_wakeups_1_bits_fflags_bits_uop_uses_stq; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_is_sys_pc2epc_0 = io_wakeups_1_bits_fflags_bits_uop_is_sys_pc2epc; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_is_unique_0 = io_wakeups_1_bits_fflags_bits_uop_is_unique; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_flush_on_commit_0 = io_wakeups_1_bits_fflags_bits_uop_flush_on_commit; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_ldst_is_rs1_0 = io_wakeups_1_bits_fflags_bits_uop_ldst_is_rs1; // @[rename-stage.scala:160:7]
wire [5:0] io_wakeups_1_bits_fflags_bits_uop_ldst_0 = io_wakeups_1_bits_fflags_bits_uop_ldst; // @[rename-stage.scala:160:7]
wire [5:0] io_wakeups_1_bits_fflags_bits_uop_lrs1_0 = io_wakeups_1_bits_fflags_bits_uop_lrs1; // @[rename-stage.scala:160:7]
wire [5:0] io_wakeups_1_bits_fflags_bits_uop_lrs2_0 = io_wakeups_1_bits_fflags_bits_uop_lrs2; // @[rename-stage.scala:160:7]
wire [5:0] io_wakeups_1_bits_fflags_bits_uop_lrs3_0 = io_wakeups_1_bits_fflags_bits_uop_lrs3; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_ldst_val_0 = io_wakeups_1_bits_fflags_bits_uop_ldst_val; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_1_bits_fflags_bits_uop_dst_rtype_0 = io_wakeups_1_bits_fflags_bits_uop_dst_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_1_bits_fflags_bits_uop_lrs1_rtype_0 = io_wakeups_1_bits_fflags_bits_uop_lrs1_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_1_bits_fflags_bits_uop_lrs2_rtype_0 = io_wakeups_1_bits_fflags_bits_uop_lrs2_rtype; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_frs3_en_0 = io_wakeups_1_bits_fflags_bits_uop_frs3_en; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_fp_val_0 = io_wakeups_1_bits_fflags_bits_uop_fp_val; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_fp_single_0 = io_wakeups_1_bits_fflags_bits_uop_fp_single; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_xcpt_pf_if_0 = io_wakeups_1_bits_fflags_bits_uop_xcpt_pf_if; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_xcpt_ae_if_0 = io_wakeups_1_bits_fflags_bits_uop_xcpt_ae_if; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_xcpt_ma_if_0 = io_wakeups_1_bits_fflags_bits_uop_xcpt_ma_if; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_bp_debug_if_0 = io_wakeups_1_bits_fflags_bits_uop_bp_debug_if; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_fflags_bits_uop_bp_xcpt_if_0 = io_wakeups_1_bits_fflags_bits_uop_bp_xcpt_if; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_1_bits_fflags_bits_uop_debug_fsrc_0 = io_wakeups_1_bits_fflags_bits_uop_debug_fsrc; // @[rename-stage.scala:160:7]
wire [1:0] io_wakeups_1_bits_fflags_bits_uop_debug_tsrc_0 = io_wakeups_1_bits_fflags_bits_uop_debug_tsrc; // @[rename-stage.scala:160:7]
wire [4:0] io_wakeups_1_bits_fflags_bits_flags_0 = io_wakeups_1_bits_fflags_bits_flags; // @[rename-stage.scala:160:7]
wire io_com_valids_0_0 = io_com_valids_0; // @[rename-stage.scala:160:7]
wire io_com_valids_1_0 = io_com_valids_1; // @[rename-stage.scala:160:7]
wire io_com_valids_2_0 = io_com_valids_2; // @[rename-stage.scala:160:7]
wire [6:0] io_com_uops_0_uopc_0 = io_com_uops_0_uopc; // @[rename-stage.scala:160:7]
wire [31:0] io_com_uops_0_inst_0 = io_com_uops_0_inst; // @[rename-stage.scala:160:7]
wire [31:0] io_com_uops_0_debug_inst_0 = io_com_uops_0_debug_inst; // @[rename-stage.scala:160:7]
wire io_com_uops_0_is_rvc_0 = io_com_uops_0_is_rvc; // @[rename-stage.scala:160:7]
wire [39:0] io_com_uops_0_debug_pc_0 = io_com_uops_0_debug_pc; // @[rename-stage.scala:160:7]
wire [2:0] io_com_uops_0_iq_type_0 = io_com_uops_0_iq_type; // @[rename-stage.scala:160:7]
wire [9:0] io_com_uops_0_fu_code_0 = io_com_uops_0_fu_code; // @[rename-stage.scala:160:7]
wire [3:0] io_com_uops_0_ctrl_br_type_0 = io_com_uops_0_ctrl_br_type; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_0_ctrl_op1_sel_0 = io_com_uops_0_ctrl_op1_sel; // @[rename-stage.scala:160:7]
wire [2:0] io_com_uops_0_ctrl_op2_sel_0 = io_com_uops_0_ctrl_op2_sel; // @[rename-stage.scala:160:7]
wire [2:0] io_com_uops_0_ctrl_imm_sel_0 = io_com_uops_0_ctrl_imm_sel; // @[rename-stage.scala:160:7]
wire [4:0] io_com_uops_0_ctrl_op_fcn_0 = io_com_uops_0_ctrl_op_fcn; // @[rename-stage.scala:160:7]
wire io_com_uops_0_ctrl_fcn_dw_0 = io_com_uops_0_ctrl_fcn_dw; // @[rename-stage.scala:160:7]
wire [2:0] io_com_uops_0_ctrl_csr_cmd_0 = io_com_uops_0_ctrl_csr_cmd; // @[rename-stage.scala:160:7]
wire io_com_uops_0_ctrl_is_load_0 = io_com_uops_0_ctrl_is_load; // @[rename-stage.scala:160:7]
wire io_com_uops_0_ctrl_is_sta_0 = io_com_uops_0_ctrl_is_sta; // @[rename-stage.scala:160:7]
wire io_com_uops_0_ctrl_is_std_0 = io_com_uops_0_ctrl_is_std; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_0_iw_state_0 = io_com_uops_0_iw_state; // @[rename-stage.scala:160:7]
wire io_com_uops_0_iw_p1_poisoned_0 = io_com_uops_0_iw_p1_poisoned; // @[rename-stage.scala:160:7]
wire io_com_uops_0_iw_p2_poisoned_0 = io_com_uops_0_iw_p2_poisoned; // @[rename-stage.scala:160:7]
wire io_com_uops_0_is_br_0 = io_com_uops_0_is_br; // @[rename-stage.scala:160:7]
wire io_com_uops_0_is_jalr_0 = io_com_uops_0_is_jalr; // @[rename-stage.scala:160:7]
wire io_com_uops_0_is_jal_0 = io_com_uops_0_is_jal; // @[rename-stage.scala:160:7]
wire io_com_uops_0_is_sfb_0 = io_com_uops_0_is_sfb; // @[rename-stage.scala:160:7]
wire [15:0] io_com_uops_0_br_mask_0 = io_com_uops_0_br_mask; // @[rename-stage.scala:160:7]
wire [3:0] io_com_uops_0_br_tag_0 = io_com_uops_0_br_tag; // @[rename-stage.scala:160:7]
wire [4:0] io_com_uops_0_ftq_idx_0 = io_com_uops_0_ftq_idx; // @[rename-stage.scala:160:7]
wire io_com_uops_0_edge_inst_0 = io_com_uops_0_edge_inst; // @[rename-stage.scala:160:7]
wire [5:0] io_com_uops_0_pc_lob_0 = io_com_uops_0_pc_lob; // @[rename-stage.scala:160:7]
wire io_com_uops_0_taken_0 = io_com_uops_0_taken; // @[rename-stage.scala:160:7]
wire [19:0] io_com_uops_0_imm_packed_0 = io_com_uops_0_imm_packed; // @[rename-stage.scala:160:7]
wire [11:0] io_com_uops_0_csr_addr_0 = io_com_uops_0_csr_addr; // @[rename-stage.scala:160:7]
wire [6:0] io_com_uops_0_rob_idx_0 = io_com_uops_0_rob_idx; // @[rename-stage.scala:160:7]
wire [4:0] io_com_uops_0_ldq_idx_0 = io_com_uops_0_ldq_idx; // @[rename-stage.scala:160:7]
wire [4:0] io_com_uops_0_stq_idx_0 = io_com_uops_0_stq_idx; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_0_rxq_idx_0 = io_com_uops_0_rxq_idx; // @[rename-stage.scala:160:7]
wire [6:0] io_com_uops_0_pdst_0 = io_com_uops_0_pdst; // @[rename-stage.scala:160:7]
wire [6:0] io_com_uops_0_prs1_0 = io_com_uops_0_prs1; // @[rename-stage.scala:160:7]
wire [6:0] io_com_uops_0_prs2_0 = io_com_uops_0_prs2; // @[rename-stage.scala:160:7]
wire [6:0] io_com_uops_0_prs3_0 = io_com_uops_0_prs3; // @[rename-stage.scala:160:7]
wire [4:0] io_com_uops_0_ppred_0 = io_com_uops_0_ppred; // @[rename-stage.scala:160:7]
wire io_com_uops_0_prs1_busy_0 = io_com_uops_0_prs1_busy; // @[rename-stage.scala:160:7]
wire io_com_uops_0_prs2_busy_0 = io_com_uops_0_prs2_busy; // @[rename-stage.scala:160:7]
wire io_com_uops_0_prs3_busy_0 = io_com_uops_0_prs3_busy; // @[rename-stage.scala:160:7]
wire io_com_uops_0_ppred_busy_0 = io_com_uops_0_ppred_busy; // @[rename-stage.scala:160:7]
wire [6:0] io_com_uops_0_stale_pdst_0 = io_com_uops_0_stale_pdst; // @[rename-stage.scala:160:7]
wire io_com_uops_0_exception_0 = io_com_uops_0_exception; // @[rename-stage.scala:160:7]
wire [63:0] io_com_uops_0_exc_cause_0 = io_com_uops_0_exc_cause; // @[rename-stage.scala:160:7]
wire io_com_uops_0_bypassable_0 = io_com_uops_0_bypassable; // @[rename-stage.scala:160:7]
wire [4:0] io_com_uops_0_mem_cmd_0 = io_com_uops_0_mem_cmd; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_0_mem_size_0 = io_com_uops_0_mem_size; // @[rename-stage.scala:160:7]
wire io_com_uops_0_mem_signed_0 = io_com_uops_0_mem_signed; // @[rename-stage.scala:160:7]
wire io_com_uops_0_is_fence_0 = io_com_uops_0_is_fence; // @[rename-stage.scala:160:7]
wire io_com_uops_0_is_fencei_0 = io_com_uops_0_is_fencei; // @[rename-stage.scala:160:7]
wire io_com_uops_0_is_amo_0 = io_com_uops_0_is_amo; // @[rename-stage.scala:160:7]
wire io_com_uops_0_uses_ldq_0 = io_com_uops_0_uses_ldq; // @[rename-stage.scala:160:7]
wire io_com_uops_0_uses_stq_0 = io_com_uops_0_uses_stq; // @[rename-stage.scala:160:7]
wire io_com_uops_0_is_sys_pc2epc_0 = io_com_uops_0_is_sys_pc2epc; // @[rename-stage.scala:160:7]
wire io_com_uops_0_is_unique_0 = io_com_uops_0_is_unique; // @[rename-stage.scala:160:7]
wire io_com_uops_0_flush_on_commit_0 = io_com_uops_0_flush_on_commit; // @[rename-stage.scala:160:7]
wire io_com_uops_0_ldst_is_rs1_0 = io_com_uops_0_ldst_is_rs1; // @[rename-stage.scala:160:7]
wire [5:0] io_com_uops_0_ldst_0 = io_com_uops_0_ldst; // @[rename-stage.scala:160:7]
wire [5:0] io_com_uops_0_lrs1_0 = io_com_uops_0_lrs1; // @[rename-stage.scala:160:7]
wire [5:0] io_com_uops_0_lrs2_0 = io_com_uops_0_lrs2; // @[rename-stage.scala:160:7]
wire [5:0] io_com_uops_0_lrs3_0 = io_com_uops_0_lrs3; // @[rename-stage.scala:160:7]
wire io_com_uops_0_ldst_val_0 = io_com_uops_0_ldst_val; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_0_dst_rtype_0 = io_com_uops_0_dst_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_0_lrs1_rtype_0 = io_com_uops_0_lrs1_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_0_lrs2_rtype_0 = io_com_uops_0_lrs2_rtype; // @[rename-stage.scala:160:7]
wire io_com_uops_0_frs3_en_0 = io_com_uops_0_frs3_en; // @[rename-stage.scala:160:7]
wire io_com_uops_0_fp_val_0 = io_com_uops_0_fp_val; // @[rename-stage.scala:160:7]
wire io_com_uops_0_fp_single_0 = io_com_uops_0_fp_single; // @[rename-stage.scala:160:7]
wire io_com_uops_0_xcpt_pf_if_0 = io_com_uops_0_xcpt_pf_if; // @[rename-stage.scala:160:7]
wire io_com_uops_0_xcpt_ae_if_0 = io_com_uops_0_xcpt_ae_if; // @[rename-stage.scala:160:7]
wire io_com_uops_0_xcpt_ma_if_0 = io_com_uops_0_xcpt_ma_if; // @[rename-stage.scala:160:7]
wire io_com_uops_0_bp_debug_if_0 = io_com_uops_0_bp_debug_if; // @[rename-stage.scala:160:7]
wire io_com_uops_0_bp_xcpt_if_0 = io_com_uops_0_bp_xcpt_if; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_0_debug_fsrc_0 = io_com_uops_0_debug_fsrc; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_0_debug_tsrc_0 = io_com_uops_0_debug_tsrc; // @[rename-stage.scala:160:7]
wire [6:0] io_com_uops_1_uopc_0 = io_com_uops_1_uopc; // @[rename-stage.scala:160:7]
wire [31:0] io_com_uops_1_inst_0 = io_com_uops_1_inst; // @[rename-stage.scala:160:7]
wire [31:0] io_com_uops_1_debug_inst_0 = io_com_uops_1_debug_inst; // @[rename-stage.scala:160:7]
wire io_com_uops_1_is_rvc_0 = io_com_uops_1_is_rvc; // @[rename-stage.scala:160:7]
wire [39:0] io_com_uops_1_debug_pc_0 = io_com_uops_1_debug_pc; // @[rename-stage.scala:160:7]
wire [2:0] io_com_uops_1_iq_type_0 = io_com_uops_1_iq_type; // @[rename-stage.scala:160:7]
wire [9:0] io_com_uops_1_fu_code_0 = io_com_uops_1_fu_code; // @[rename-stage.scala:160:7]
wire [3:0] io_com_uops_1_ctrl_br_type_0 = io_com_uops_1_ctrl_br_type; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_1_ctrl_op1_sel_0 = io_com_uops_1_ctrl_op1_sel; // @[rename-stage.scala:160:7]
wire [2:0] io_com_uops_1_ctrl_op2_sel_0 = io_com_uops_1_ctrl_op2_sel; // @[rename-stage.scala:160:7]
wire [2:0] io_com_uops_1_ctrl_imm_sel_0 = io_com_uops_1_ctrl_imm_sel; // @[rename-stage.scala:160:7]
wire [4:0] io_com_uops_1_ctrl_op_fcn_0 = io_com_uops_1_ctrl_op_fcn; // @[rename-stage.scala:160:7]
wire io_com_uops_1_ctrl_fcn_dw_0 = io_com_uops_1_ctrl_fcn_dw; // @[rename-stage.scala:160:7]
wire [2:0] io_com_uops_1_ctrl_csr_cmd_0 = io_com_uops_1_ctrl_csr_cmd; // @[rename-stage.scala:160:7]
wire io_com_uops_1_ctrl_is_load_0 = io_com_uops_1_ctrl_is_load; // @[rename-stage.scala:160:7]
wire io_com_uops_1_ctrl_is_sta_0 = io_com_uops_1_ctrl_is_sta; // @[rename-stage.scala:160:7]
wire io_com_uops_1_ctrl_is_std_0 = io_com_uops_1_ctrl_is_std; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_1_iw_state_0 = io_com_uops_1_iw_state; // @[rename-stage.scala:160:7]
wire io_com_uops_1_iw_p1_poisoned_0 = io_com_uops_1_iw_p1_poisoned; // @[rename-stage.scala:160:7]
wire io_com_uops_1_iw_p2_poisoned_0 = io_com_uops_1_iw_p2_poisoned; // @[rename-stage.scala:160:7]
wire io_com_uops_1_is_br_0 = io_com_uops_1_is_br; // @[rename-stage.scala:160:7]
wire io_com_uops_1_is_jalr_0 = io_com_uops_1_is_jalr; // @[rename-stage.scala:160:7]
wire io_com_uops_1_is_jal_0 = io_com_uops_1_is_jal; // @[rename-stage.scala:160:7]
wire io_com_uops_1_is_sfb_0 = io_com_uops_1_is_sfb; // @[rename-stage.scala:160:7]
wire [15:0] io_com_uops_1_br_mask_0 = io_com_uops_1_br_mask; // @[rename-stage.scala:160:7]
wire [3:0] io_com_uops_1_br_tag_0 = io_com_uops_1_br_tag; // @[rename-stage.scala:160:7]
wire [4:0] io_com_uops_1_ftq_idx_0 = io_com_uops_1_ftq_idx; // @[rename-stage.scala:160:7]
wire io_com_uops_1_edge_inst_0 = io_com_uops_1_edge_inst; // @[rename-stage.scala:160:7]
wire [5:0] io_com_uops_1_pc_lob_0 = io_com_uops_1_pc_lob; // @[rename-stage.scala:160:7]
wire io_com_uops_1_taken_0 = io_com_uops_1_taken; // @[rename-stage.scala:160:7]
wire [19:0] io_com_uops_1_imm_packed_0 = io_com_uops_1_imm_packed; // @[rename-stage.scala:160:7]
wire [11:0] io_com_uops_1_csr_addr_0 = io_com_uops_1_csr_addr; // @[rename-stage.scala:160:7]
wire [6:0] io_com_uops_1_rob_idx_0 = io_com_uops_1_rob_idx; // @[rename-stage.scala:160:7]
wire [4:0] io_com_uops_1_ldq_idx_0 = io_com_uops_1_ldq_idx; // @[rename-stage.scala:160:7]
wire [4:0] io_com_uops_1_stq_idx_0 = io_com_uops_1_stq_idx; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_1_rxq_idx_0 = io_com_uops_1_rxq_idx; // @[rename-stage.scala:160:7]
wire [6:0] io_com_uops_1_pdst_0 = io_com_uops_1_pdst; // @[rename-stage.scala:160:7]
wire [6:0] io_com_uops_1_prs1_0 = io_com_uops_1_prs1; // @[rename-stage.scala:160:7]
wire [6:0] io_com_uops_1_prs2_0 = io_com_uops_1_prs2; // @[rename-stage.scala:160:7]
wire [6:0] io_com_uops_1_prs3_0 = io_com_uops_1_prs3; // @[rename-stage.scala:160:7]
wire [4:0] io_com_uops_1_ppred_0 = io_com_uops_1_ppred; // @[rename-stage.scala:160:7]
wire io_com_uops_1_prs1_busy_0 = io_com_uops_1_prs1_busy; // @[rename-stage.scala:160:7]
wire io_com_uops_1_prs2_busy_0 = io_com_uops_1_prs2_busy; // @[rename-stage.scala:160:7]
wire io_com_uops_1_prs3_busy_0 = io_com_uops_1_prs3_busy; // @[rename-stage.scala:160:7]
wire io_com_uops_1_ppred_busy_0 = io_com_uops_1_ppred_busy; // @[rename-stage.scala:160:7]
wire [6:0] io_com_uops_1_stale_pdst_0 = io_com_uops_1_stale_pdst; // @[rename-stage.scala:160:7]
wire io_com_uops_1_exception_0 = io_com_uops_1_exception; // @[rename-stage.scala:160:7]
wire [63:0] io_com_uops_1_exc_cause_0 = io_com_uops_1_exc_cause; // @[rename-stage.scala:160:7]
wire io_com_uops_1_bypassable_0 = io_com_uops_1_bypassable; // @[rename-stage.scala:160:7]
wire [4:0] io_com_uops_1_mem_cmd_0 = io_com_uops_1_mem_cmd; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_1_mem_size_0 = io_com_uops_1_mem_size; // @[rename-stage.scala:160:7]
wire io_com_uops_1_mem_signed_0 = io_com_uops_1_mem_signed; // @[rename-stage.scala:160:7]
wire io_com_uops_1_is_fence_0 = io_com_uops_1_is_fence; // @[rename-stage.scala:160:7]
wire io_com_uops_1_is_fencei_0 = io_com_uops_1_is_fencei; // @[rename-stage.scala:160:7]
wire io_com_uops_1_is_amo_0 = io_com_uops_1_is_amo; // @[rename-stage.scala:160:7]
wire io_com_uops_1_uses_ldq_0 = io_com_uops_1_uses_ldq; // @[rename-stage.scala:160:7]
wire io_com_uops_1_uses_stq_0 = io_com_uops_1_uses_stq; // @[rename-stage.scala:160:7]
wire io_com_uops_1_is_sys_pc2epc_0 = io_com_uops_1_is_sys_pc2epc; // @[rename-stage.scala:160:7]
wire io_com_uops_1_is_unique_0 = io_com_uops_1_is_unique; // @[rename-stage.scala:160:7]
wire io_com_uops_1_flush_on_commit_0 = io_com_uops_1_flush_on_commit; // @[rename-stage.scala:160:7]
wire io_com_uops_1_ldst_is_rs1_0 = io_com_uops_1_ldst_is_rs1; // @[rename-stage.scala:160:7]
wire [5:0] io_com_uops_1_ldst_0 = io_com_uops_1_ldst; // @[rename-stage.scala:160:7]
wire [5:0] io_com_uops_1_lrs1_0 = io_com_uops_1_lrs1; // @[rename-stage.scala:160:7]
wire [5:0] io_com_uops_1_lrs2_0 = io_com_uops_1_lrs2; // @[rename-stage.scala:160:7]
wire [5:0] io_com_uops_1_lrs3_0 = io_com_uops_1_lrs3; // @[rename-stage.scala:160:7]
wire io_com_uops_1_ldst_val_0 = io_com_uops_1_ldst_val; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_1_dst_rtype_0 = io_com_uops_1_dst_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_1_lrs1_rtype_0 = io_com_uops_1_lrs1_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_1_lrs2_rtype_0 = io_com_uops_1_lrs2_rtype; // @[rename-stage.scala:160:7]
wire io_com_uops_1_frs3_en_0 = io_com_uops_1_frs3_en; // @[rename-stage.scala:160:7]
wire io_com_uops_1_fp_val_0 = io_com_uops_1_fp_val; // @[rename-stage.scala:160:7]
wire io_com_uops_1_fp_single_0 = io_com_uops_1_fp_single; // @[rename-stage.scala:160:7]
wire io_com_uops_1_xcpt_pf_if_0 = io_com_uops_1_xcpt_pf_if; // @[rename-stage.scala:160:7]
wire io_com_uops_1_xcpt_ae_if_0 = io_com_uops_1_xcpt_ae_if; // @[rename-stage.scala:160:7]
wire io_com_uops_1_xcpt_ma_if_0 = io_com_uops_1_xcpt_ma_if; // @[rename-stage.scala:160:7]
wire io_com_uops_1_bp_debug_if_0 = io_com_uops_1_bp_debug_if; // @[rename-stage.scala:160:7]
wire io_com_uops_1_bp_xcpt_if_0 = io_com_uops_1_bp_xcpt_if; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_1_debug_fsrc_0 = io_com_uops_1_debug_fsrc; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_1_debug_tsrc_0 = io_com_uops_1_debug_tsrc; // @[rename-stage.scala:160:7]
wire [6:0] io_com_uops_2_uopc_0 = io_com_uops_2_uopc; // @[rename-stage.scala:160:7]
wire [31:0] io_com_uops_2_inst_0 = io_com_uops_2_inst; // @[rename-stage.scala:160:7]
wire [31:0] io_com_uops_2_debug_inst_0 = io_com_uops_2_debug_inst; // @[rename-stage.scala:160:7]
wire io_com_uops_2_is_rvc_0 = io_com_uops_2_is_rvc; // @[rename-stage.scala:160:7]
wire [39:0] io_com_uops_2_debug_pc_0 = io_com_uops_2_debug_pc; // @[rename-stage.scala:160:7]
wire [2:0] io_com_uops_2_iq_type_0 = io_com_uops_2_iq_type; // @[rename-stage.scala:160:7]
wire [9:0] io_com_uops_2_fu_code_0 = io_com_uops_2_fu_code; // @[rename-stage.scala:160:7]
wire [3:0] io_com_uops_2_ctrl_br_type_0 = io_com_uops_2_ctrl_br_type; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_2_ctrl_op1_sel_0 = io_com_uops_2_ctrl_op1_sel; // @[rename-stage.scala:160:7]
wire [2:0] io_com_uops_2_ctrl_op2_sel_0 = io_com_uops_2_ctrl_op2_sel; // @[rename-stage.scala:160:7]
wire [2:0] io_com_uops_2_ctrl_imm_sel_0 = io_com_uops_2_ctrl_imm_sel; // @[rename-stage.scala:160:7]
wire [4:0] io_com_uops_2_ctrl_op_fcn_0 = io_com_uops_2_ctrl_op_fcn; // @[rename-stage.scala:160:7]
wire io_com_uops_2_ctrl_fcn_dw_0 = io_com_uops_2_ctrl_fcn_dw; // @[rename-stage.scala:160:7]
wire [2:0] io_com_uops_2_ctrl_csr_cmd_0 = io_com_uops_2_ctrl_csr_cmd; // @[rename-stage.scala:160:7]
wire io_com_uops_2_ctrl_is_load_0 = io_com_uops_2_ctrl_is_load; // @[rename-stage.scala:160:7]
wire io_com_uops_2_ctrl_is_sta_0 = io_com_uops_2_ctrl_is_sta; // @[rename-stage.scala:160:7]
wire io_com_uops_2_ctrl_is_std_0 = io_com_uops_2_ctrl_is_std; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_2_iw_state_0 = io_com_uops_2_iw_state; // @[rename-stage.scala:160:7]
wire io_com_uops_2_iw_p1_poisoned_0 = io_com_uops_2_iw_p1_poisoned; // @[rename-stage.scala:160:7]
wire io_com_uops_2_iw_p2_poisoned_0 = io_com_uops_2_iw_p2_poisoned; // @[rename-stage.scala:160:7]
wire io_com_uops_2_is_br_0 = io_com_uops_2_is_br; // @[rename-stage.scala:160:7]
wire io_com_uops_2_is_jalr_0 = io_com_uops_2_is_jalr; // @[rename-stage.scala:160:7]
wire io_com_uops_2_is_jal_0 = io_com_uops_2_is_jal; // @[rename-stage.scala:160:7]
wire io_com_uops_2_is_sfb_0 = io_com_uops_2_is_sfb; // @[rename-stage.scala:160:7]
wire [15:0] io_com_uops_2_br_mask_0 = io_com_uops_2_br_mask; // @[rename-stage.scala:160:7]
wire [3:0] io_com_uops_2_br_tag_0 = io_com_uops_2_br_tag; // @[rename-stage.scala:160:7]
wire [4:0] io_com_uops_2_ftq_idx_0 = io_com_uops_2_ftq_idx; // @[rename-stage.scala:160:7]
wire io_com_uops_2_edge_inst_0 = io_com_uops_2_edge_inst; // @[rename-stage.scala:160:7]
wire [5:0] io_com_uops_2_pc_lob_0 = io_com_uops_2_pc_lob; // @[rename-stage.scala:160:7]
wire io_com_uops_2_taken_0 = io_com_uops_2_taken; // @[rename-stage.scala:160:7]
wire [19:0] io_com_uops_2_imm_packed_0 = io_com_uops_2_imm_packed; // @[rename-stage.scala:160:7]
wire [11:0] io_com_uops_2_csr_addr_0 = io_com_uops_2_csr_addr; // @[rename-stage.scala:160:7]
wire [6:0] io_com_uops_2_rob_idx_0 = io_com_uops_2_rob_idx; // @[rename-stage.scala:160:7]
wire [4:0] io_com_uops_2_ldq_idx_0 = io_com_uops_2_ldq_idx; // @[rename-stage.scala:160:7]
wire [4:0] io_com_uops_2_stq_idx_0 = io_com_uops_2_stq_idx; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_2_rxq_idx_0 = io_com_uops_2_rxq_idx; // @[rename-stage.scala:160:7]
wire [6:0] io_com_uops_2_pdst_0 = io_com_uops_2_pdst; // @[rename-stage.scala:160:7]
wire [6:0] io_com_uops_2_prs1_0 = io_com_uops_2_prs1; // @[rename-stage.scala:160:7]
wire [6:0] io_com_uops_2_prs2_0 = io_com_uops_2_prs2; // @[rename-stage.scala:160:7]
wire [6:0] io_com_uops_2_prs3_0 = io_com_uops_2_prs3; // @[rename-stage.scala:160:7]
wire [4:0] io_com_uops_2_ppred_0 = io_com_uops_2_ppred; // @[rename-stage.scala:160:7]
wire io_com_uops_2_prs1_busy_0 = io_com_uops_2_prs1_busy; // @[rename-stage.scala:160:7]
wire io_com_uops_2_prs2_busy_0 = io_com_uops_2_prs2_busy; // @[rename-stage.scala:160:7]
wire io_com_uops_2_prs3_busy_0 = io_com_uops_2_prs3_busy; // @[rename-stage.scala:160:7]
wire io_com_uops_2_ppred_busy_0 = io_com_uops_2_ppred_busy; // @[rename-stage.scala:160:7]
wire [6:0] io_com_uops_2_stale_pdst_0 = io_com_uops_2_stale_pdst; // @[rename-stage.scala:160:7]
wire io_com_uops_2_exception_0 = io_com_uops_2_exception; // @[rename-stage.scala:160:7]
wire [63:0] io_com_uops_2_exc_cause_0 = io_com_uops_2_exc_cause; // @[rename-stage.scala:160:7]
wire io_com_uops_2_bypassable_0 = io_com_uops_2_bypassable; // @[rename-stage.scala:160:7]
wire [4:0] io_com_uops_2_mem_cmd_0 = io_com_uops_2_mem_cmd; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_2_mem_size_0 = io_com_uops_2_mem_size; // @[rename-stage.scala:160:7]
wire io_com_uops_2_mem_signed_0 = io_com_uops_2_mem_signed; // @[rename-stage.scala:160:7]
wire io_com_uops_2_is_fence_0 = io_com_uops_2_is_fence; // @[rename-stage.scala:160:7]
wire io_com_uops_2_is_fencei_0 = io_com_uops_2_is_fencei; // @[rename-stage.scala:160:7]
wire io_com_uops_2_is_amo_0 = io_com_uops_2_is_amo; // @[rename-stage.scala:160:7]
wire io_com_uops_2_uses_ldq_0 = io_com_uops_2_uses_ldq; // @[rename-stage.scala:160:7]
wire io_com_uops_2_uses_stq_0 = io_com_uops_2_uses_stq; // @[rename-stage.scala:160:7]
wire io_com_uops_2_is_sys_pc2epc_0 = io_com_uops_2_is_sys_pc2epc; // @[rename-stage.scala:160:7]
wire io_com_uops_2_is_unique_0 = io_com_uops_2_is_unique; // @[rename-stage.scala:160:7]
wire io_com_uops_2_flush_on_commit_0 = io_com_uops_2_flush_on_commit; // @[rename-stage.scala:160:7]
wire io_com_uops_2_ldst_is_rs1_0 = io_com_uops_2_ldst_is_rs1; // @[rename-stage.scala:160:7]
wire [5:0] io_com_uops_2_ldst_0 = io_com_uops_2_ldst; // @[rename-stage.scala:160:7]
wire [5:0] io_com_uops_2_lrs1_0 = io_com_uops_2_lrs1; // @[rename-stage.scala:160:7]
wire [5:0] io_com_uops_2_lrs2_0 = io_com_uops_2_lrs2; // @[rename-stage.scala:160:7]
wire [5:0] io_com_uops_2_lrs3_0 = io_com_uops_2_lrs3; // @[rename-stage.scala:160:7]
wire io_com_uops_2_ldst_val_0 = io_com_uops_2_ldst_val; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_2_dst_rtype_0 = io_com_uops_2_dst_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_2_lrs1_rtype_0 = io_com_uops_2_lrs1_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_2_lrs2_rtype_0 = io_com_uops_2_lrs2_rtype; // @[rename-stage.scala:160:7]
wire io_com_uops_2_frs3_en_0 = io_com_uops_2_frs3_en; // @[rename-stage.scala:160:7]
wire io_com_uops_2_fp_val_0 = io_com_uops_2_fp_val; // @[rename-stage.scala:160:7]
wire io_com_uops_2_fp_single_0 = io_com_uops_2_fp_single; // @[rename-stage.scala:160:7]
wire io_com_uops_2_xcpt_pf_if_0 = io_com_uops_2_xcpt_pf_if; // @[rename-stage.scala:160:7]
wire io_com_uops_2_xcpt_ae_if_0 = io_com_uops_2_xcpt_ae_if; // @[rename-stage.scala:160:7]
wire io_com_uops_2_xcpt_ma_if_0 = io_com_uops_2_xcpt_ma_if; // @[rename-stage.scala:160:7]
wire io_com_uops_2_bp_debug_if_0 = io_com_uops_2_bp_debug_if; // @[rename-stage.scala:160:7]
wire io_com_uops_2_bp_xcpt_if_0 = io_com_uops_2_bp_xcpt_if; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_2_debug_fsrc_0 = io_com_uops_2_debug_fsrc; // @[rename-stage.scala:160:7]
wire [1:0] io_com_uops_2_debug_tsrc_0 = io_com_uops_2_debug_tsrc; // @[rename-stage.scala:160:7]
wire io_rbk_valids_0_0 = io_rbk_valids_0; // @[rename-stage.scala:160:7]
wire io_rbk_valids_1_0 = io_rbk_valids_1; // @[rename-stage.scala:160:7]
wire io_rbk_valids_2_0 = io_rbk_valids_2; // @[rename-stage.scala:160:7]
wire io_rollback_0 = io_rollback; // @[rename-stage.scala:160:7]
wire io_debug_rob_empty_0 = io_debug_rob_empty; // @[rename-stage.scala:160:7]
wire [3:0] io_dec_uops_0_ctrl_br_type = 4'h0; // @[rename-stage.scala:160:7]
wire [3:0] io_dec_uops_1_ctrl_br_type = 4'h0; // @[rename-stage.scala:160:7]
wire [3:0] io_dec_uops_2_ctrl_br_type = 4'h0; // @[rename-stage.scala:160:7]
wire [3:0] ren1_uops_0_ctrl_br_type = 4'h0; // @[rename-stage.scala:101:29]
wire [3:0] ren1_uops_1_ctrl_br_type = 4'h0; // @[rename-stage.scala:101:29]
wire [3:0] ren1_uops_2_ctrl_br_type = 4'h0; // @[rename-stage.scala:101:29]
wire [1:0] io_dec_uops_0_ctrl_op1_sel = 2'h0; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_0_iw_state = 2'h0; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_0_rxq_idx = 2'h0; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_0_debug_tsrc = 2'h0; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_1_ctrl_op1_sel = 2'h0; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_1_iw_state = 2'h0; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_1_rxq_idx = 2'h0; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_1_debug_tsrc = 2'h0; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_2_ctrl_op1_sel = 2'h0; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_2_iw_state = 2'h0; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_2_rxq_idx = 2'h0; // @[rename-stage.scala:160:7]
wire [1:0] io_dec_uops_2_debug_tsrc = 2'h0; // @[rename-stage.scala:160:7]
wire [1:0] ren1_uops_0_ctrl_op1_sel = 2'h0; // @[rename-stage.scala:101:29]
wire [1:0] ren1_uops_0_iw_state = 2'h0; // @[rename-stage.scala:101:29]
wire [1:0] ren1_uops_0_rxq_idx = 2'h0; // @[rename-stage.scala:101:29]
wire [1:0] ren1_uops_0_debug_tsrc = 2'h0; // @[rename-stage.scala:101:29]
wire [1:0] ren1_uops_1_ctrl_op1_sel = 2'h0; // @[rename-stage.scala:101:29]
wire [1:0] ren1_uops_1_iw_state = 2'h0; // @[rename-stage.scala:101:29]
wire [1:0] ren1_uops_1_rxq_idx = 2'h0; // @[rename-stage.scala:101:29]
wire [1:0] ren1_uops_1_debug_tsrc = 2'h0; // @[rename-stage.scala:101:29]
wire [1:0] ren1_uops_2_ctrl_op1_sel = 2'h0; // @[rename-stage.scala:101:29]
wire [1:0] ren1_uops_2_iw_state = 2'h0; // @[rename-stage.scala:101:29]
wire [1:0] ren1_uops_2_rxq_idx = 2'h0; // @[rename-stage.scala:101:29]
wire [1:0] ren1_uops_2_debug_tsrc = 2'h0; // @[rename-stage.scala:101:29]
wire [2:0] io_dec_uops_0_ctrl_op2_sel = 3'h0; // @[rename-stage.scala:160:7]
wire [2:0] io_dec_uops_0_ctrl_imm_sel = 3'h0; // @[rename-stage.scala:160:7]
wire [2:0] io_dec_uops_0_ctrl_csr_cmd = 3'h0; // @[rename-stage.scala:160:7]
wire [2:0] io_dec_uops_1_ctrl_op2_sel = 3'h0; // @[rename-stage.scala:160:7]
wire [2:0] io_dec_uops_1_ctrl_imm_sel = 3'h0; // @[rename-stage.scala:160:7]
wire [2:0] io_dec_uops_1_ctrl_csr_cmd = 3'h0; // @[rename-stage.scala:160:7]
wire [2:0] io_dec_uops_2_ctrl_op2_sel = 3'h0; // @[rename-stage.scala:160:7]
wire [2:0] io_dec_uops_2_ctrl_imm_sel = 3'h0; // @[rename-stage.scala:160:7]
wire [2:0] io_dec_uops_2_ctrl_csr_cmd = 3'h0; // @[rename-stage.scala:160:7]
wire [2:0] ren1_uops_0_ctrl_op2_sel = 3'h0; // @[rename-stage.scala:101:29]
wire [2:0] ren1_uops_0_ctrl_imm_sel = 3'h0; // @[rename-stage.scala:101:29]
wire [2:0] ren1_uops_0_ctrl_csr_cmd = 3'h0; // @[rename-stage.scala:101:29]
wire [2:0] ren1_uops_1_ctrl_op2_sel = 3'h0; // @[rename-stage.scala:101:29]
wire [2:0] ren1_uops_1_ctrl_imm_sel = 3'h0; // @[rename-stage.scala:101:29]
wire [2:0] ren1_uops_1_ctrl_csr_cmd = 3'h0; // @[rename-stage.scala:101:29]
wire [2:0] ren1_uops_2_ctrl_op2_sel = 3'h0; // @[rename-stage.scala:101:29]
wire [2:0] ren1_uops_2_ctrl_imm_sel = 3'h0; // @[rename-stage.scala:101:29]
wire [2:0] ren1_uops_2_ctrl_csr_cmd = 3'h0; // @[rename-stage.scala:101:29]
wire [4:0] io_dec_uops_0_ctrl_op_fcn = 5'h0; // @[rename-stage.scala:160:7]
wire [4:0] io_dec_uops_0_ldq_idx = 5'h0; // @[rename-stage.scala:160:7]
wire [4:0] io_dec_uops_0_stq_idx = 5'h0; // @[rename-stage.scala:160:7]
wire [4:0] io_dec_uops_0_ppred = 5'h0; // @[rename-stage.scala:160:7]
wire [4:0] io_dec_uops_1_ctrl_op_fcn = 5'h0; // @[rename-stage.scala:160:7]
wire [4:0] io_dec_uops_1_ldq_idx = 5'h0; // @[rename-stage.scala:160:7]
wire [4:0] io_dec_uops_1_stq_idx = 5'h0; // @[rename-stage.scala:160:7]
wire [4:0] io_dec_uops_1_ppred = 5'h0; // @[rename-stage.scala:160:7]
wire [4:0] io_dec_uops_2_ctrl_op_fcn = 5'h0; // @[rename-stage.scala:160:7]
wire [4:0] io_dec_uops_2_ldq_idx = 5'h0; // @[rename-stage.scala:160:7]
wire [4:0] io_dec_uops_2_stq_idx = 5'h0; // @[rename-stage.scala:160:7]
wire [4:0] io_dec_uops_2_ppred = 5'h0; // @[rename-stage.scala:160:7]
wire [4:0] ren1_uops_0_ctrl_op_fcn = 5'h0; // @[rename-stage.scala:101:29]
wire [4:0] ren1_uops_0_ldq_idx = 5'h0; // @[rename-stage.scala:101:29]
wire [4:0] ren1_uops_0_stq_idx = 5'h0; // @[rename-stage.scala:101:29]
wire [4:0] ren1_uops_0_ppred = 5'h0; // @[rename-stage.scala:101:29]
wire [4:0] ren1_uops_1_ctrl_op_fcn = 5'h0; // @[rename-stage.scala:101:29]
wire [4:0] ren1_uops_1_ldq_idx = 5'h0; // @[rename-stage.scala:101:29]
wire [4:0] ren1_uops_1_stq_idx = 5'h0; // @[rename-stage.scala:101:29]
wire [4:0] ren1_uops_1_ppred = 5'h0; // @[rename-stage.scala:101:29]
wire [4:0] ren1_uops_2_ctrl_op_fcn = 5'h0; // @[rename-stage.scala:101:29]
wire [4:0] ren1_uops_2_ldq_idx = 5'h0; // @[rename-stage.scala:101:29]
wire [4:0] ren1_uops_2_stq_idx = 5'h0; // @[rename-stage.scala:101:29]
wire [4:0] ren1_uops_2_ppred = 5'h0; // @[rename-stage.scala:101:29]
wire io_dec_uops_0_ctrl_fcn_dw = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_ctrl_is_load = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_ctrl_is_sta = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_ctrl_is_std = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_iw_p1_poisoned = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_iw_p2_poisoned = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_prs1_busy = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_prs2_busy = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_prs3_busy = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_ppred_busy = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_ldst_is_rs1 = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_0_xcpt_ma_if = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_ctrl_fcn_dw = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_ctrl_is_load = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_ctrl_is_sta = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_ctrl_is_std = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_iw_p1_poisoned = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_iw_p2_poisoned = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_prs1_busy = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_prs2_busy = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_prs3_busy = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_ppred_busy = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_ldst_is_rs1 = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_1_xcpt_ma_if = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_ctrl_fcn_dw = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_ctrl_is_load = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_ctrl_is_sta = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_ctrl_is_std = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_iw_p1_poisoned = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_iw_p2_poisoned = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_prs1_busy = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_prs2_busy = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_prs3_busy = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_ppred_busy = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_ldst_is_rs1 = 1'h0; // @[rename-stage.scala:160:7]
wire io_dec_uops_2_xcpt_ma_if = 1'h0; // @[rename-stage.scala:160:7]
wire io_wakeups_1_bits_predicated = 1'h0; // @[rename-stage.scala:160:7]
wire ren1_uops_0_ctrl_fcn_dw = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_0_ctrl_is_load = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_0_ctrl_is_sta = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_0_ctrl_is_std = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_0_iw_p1_poisoned = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_0_iw_p2_poisoned = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_0_prs1_busy = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_0_prs2_busy = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_0_prs3_busy = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_0_ppred_busy = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_0_ldst_is_rs1 = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_0_xcpt_ma_if = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_1_ctrl_fcn_dw = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_1_ctrl_is_load = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_1_ctrl_is_sta = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_1_ctrl_is_std = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_1_iw_p1_poisoned = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_1_iw_p2_poisoned = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_1_prs1_busy = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_1_prs2_busy = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_1_prs3_busy = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_1_ppred_busy = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_1_ldst_is_rs1 = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_1_xcpt_ma_if = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_2_ctrl_fcn_dw = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_2_ctrl_is_load = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_2_ctrl_is_sta = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_2_ctrl_is_std = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_2_iw_p1_poisoned = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_2_iw_p2_poisoned = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_2_prs1_busy = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_2_prs2_busy = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_2_prs3_busy = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_2_ppred_busy = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_2_ldst_is_rs1 = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_2_xcpt_ma_if = 1'h0; // @[rename-stage.scala:101:29]
wire [11:0] io_dec_uops_0_csr_addr = 12'h0; // @[rename-stage.scala:160:7]
wire [11:0] io_dec_uops_1_csr_addr = 12'h0; // @[rename-stage.scala:160:7]
wire [11:0] io_dec_uops_2_csr_addr = 12'h0; // @[rename-stage.scala:160:7]
wire [11:0] ren1_uops_0_csr_addr = 12'h0; // @[rename-stage.scala:101:29]
wire [11:0] ren1_uops_1_csr_addr = 12'h0; // @[rename-stage.scala:101:29]
wire [11:0] ren1_uops_2_csr_addr = 12'h0; // @[rename-stage.scala:101:29]
wire [6:0] io_dec_uops_0_rob_idx = 7'h0; // @[rename-stage.scala:160:7]
wire [6:0] io_dec_uops_0_pdst = 7'h0; // @[rename-stage.scala:160:7]
wire [6:0] io_dec_uops_0_prs1 = 7'h0; // @[rename-stage.scala:160:7]
wire [6:0] io_dec_uops_0_prs2 = 7'h0; // @[rename-stage.scala:160:7]
wire [6:0] io_dec_uops_0_prs3 = 7'h0; // @[rename-stage.scala:160:7]
wire [6:0] io_dec_uops_0_stale_pdst = 7'h0; // @[rename-stage.scala:160:7]
wire [6:0] io_dec_uops_1_rob_idx = 7'h0; // @[rename-stage.scala:160:7]
wire [6:0] io_dec_uops_1_pdst = 7'h0; // @[rename-stage.scala:160:7]
wire [6:0] io_dec_uops_1_prs1 = 7'h0; // @[rename-stage.scala:160:7]
wire [6:0] io_dec_uops_1_prs2 = 7'h0; // @[rename-stage.scala:160:7]
wire [6:0] io_dec_uops_1_prs3 = 7'h0; // @[rename-stage.scala:160:7]
wire [6:0] io_dec_uops_1_stale_pdst = 7'h0; // @[rename-stage.scala:160:7]
wire [6:0] io_dec_uops_2_rob_idx = 7'h0; // @[rename-stage.scala:160:7]
wire [6:0] io_dec_uops_2_pdst = 7'h0; // @[rename-stage.scala:160:7]
wire [6:0] io_dec_uops_2_prs1 = 7'h0; // @[rename-stage.scala:160:7]
wire [6:0] io_dec_uops_2_prs2 = 7'h0; // @[rename-stage.scala:160:7]
wire [6:0] io_dec_uops_2_prs3 = 7'h0; // @[rename-stage.scala:160:7]
wire [6:0] io_dec_uops_2_stale_pdst = 7'h0; // @[rename-stage.scala:160:7]
wire [6:0] ren1_uops_0_rob_idx = 7'h0; // @[rename-stage.scala:101:29]
wire [6:0] ren1_uops_0_pdst = 7'h0; // @[rename-stage.scala:101:29]
wire [6:0] ren1_uops_1_rob_idx = 7'h0; // @[rename-stage.scala:101:29]
wire [6:0] ren1_uops_1_pdst = 7'h0; // @[rename-stage.scala:101:29]
wire [6:0] ren1_uops_2_rob_idx = 7'h0; // @[rename-stage.scala:101:29]
wire [6:0] ren1_uops_2_pdst = 7'h0; // @[rename-stage.scala:101:29]
wire [95:0] io_debug_isprlist = 96'h0; // @[rename-stage.scala:160:7]
wire _ren2_uops_0_pdst_T_1 = 1'h1; // @[rename-stage.scala:306:38]
wire _ren2_uops_1_pdst_T_1 = 1'h1; // @[rename-stage.scala:306:38]
wire _ren2_uops_2_pdst_T_1 = 1'h1; // @[rename-stage.scala:306:38]
wire _io_ren_stalls_0_T_2; // @[rename-stage.scala:339:60]
wire _io_ren_stalls_1_T_2; // @[rename-stage.scala:339:60]
wire _io_ren_stalls_2_T_2; // @[rename-stage.scala:339:60]
wire ren1_fire_0 = io_dec_fire_0_0; // @[rename-stage.scala:100:29, :160:7]
wire ren1_fire_1 = io_dec_fire_1_0; // @[rename-stage.scala:100:29, :160:7]
wire ren1_fire_2 = io_dec_fire_2_0; // @[rename-stage.scala:100:29, :160:7]
wire [6:0] ren1_uops_0_uopc = io_dec_uops_0_uopc_0; // @[rename-stage.scala:101:29, :160:7]
wire [31:0] ren1_uops_0_inst = io_dec_uops_0_inst_0; // @[rename-stage.scala:101:29, :160:7]
wire [31:0] ren1_uops_0_debug_inst = io_dec_uops_0_debug_inst_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_is_rvc = io_dec_uops_0_is_rvc_0; // @[rename-stage.scala:101:29, :160:7]
wire [39:0] ren1_uops_0_debug_pc = io_dec_uops_0_debug_pc_0; // @[rename-stage.scala:101:29, :160:7]
wire [2:0] ren1_uops_0_iq_type = io_dec_uops_0_iq_type_0; // @[rename-stage.scala:101:29, :160:7]
wire [9:0] ren1_uops_0_fu_code = io_dec_uops_0_fu_code_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_is_br = io_dec_uops_0_is_br_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_is_jalr = io_dec_uops_0_is_jalr_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_is_jal = io_dec_uops_0_is_jal_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_is_sfb = io_dec_uops_0_is_sfb_0; // @[rename-stage.scala:101:29, :160:7]
wire [15:0] ren1_uops_0_br_mask = io_dec_uops_0_br_mask_0; // @[rename-stage.scala:101:29, :160:7]
wire [3:0] ren1_uops_0_br_tag = io_dec_uops_0_br_tag_0; // @[rename-stage.scala:101:29, :160:7]
wire [4:0] ren1_uops_0_ftq_idx = io_dec_uops_0_ftq_idx_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_edge_inst = io_dec_uops_0_edge_inst_0; // @[rename-stage.scala:101:29, :160:7]
wire [5:0] ren1_uops_0_pc_lob = io_dec_uops_0_pc_lob_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_taken = io_dec_uops_0_taken_0; // @[rename-stage.scala:101:29, :160:7]
wire [19:0] ren1_uops_0_imm_packed = io_dec_uops_0_imm_packed_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_exception = io_dec_uops_0_exception_0; // @[rename-stage.scala:101:29, :160:7]
wire [63:0] ren1_uops_0_exc_cause = io_dec_uops_0_exc_cause_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_bypassable = io_dec_uops_0_bypassable_0; // @[rename-stage.scala:101:29, :160:7]
wire [4:0] ren1_uops_0_mem_cmd = io_dec_uops_0_mem_cmd_0; // @[rename-stage.scala:101:29, :160:7]
wire [1:0] ren1_uops_0_mem_size = io_dec_uops_0_mem_size_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_mem_signed = io_dec_uops_0_mem_signed_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_is_fence = io_dec_uops_0_is_fence_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_is_fencei = io_dec_uops_0_is_fencei_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_is_amo = io_dec_uops_0_is_amo_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_uses_ldq = io_dec_uops_0_uses_ldq_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_uses_stq = io_dec_uops_0_uses_stq_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_is_sys_pc2epc = io_dec_uops_0_is_sys_pc2epc_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_is_unique = io_dec_uops_0_is_unique_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_flush_on_commit = io_dec_uops_0_flush_on_commit_0; // @[rename-stage.scala:101:29, :160:7]
wire [5:0] ren1_uops_0_ldst = io_dec_uops_0_ldst_0; // @[rename-stage.scala:101:29, :160:7]
wire [5:0] ren1_uops_0_lrs1 = io_dec_uops_0_lrs1_0; // @[rename-stage.scala:101:29, :160:7]
wire [5:0] ren1_uops_0_lrs2 = io_dec_uops_0_lrs2_0; // @[rename-stage.scala:101:29, :160:7]
wire [5:0] ren1_uops_0_lrs3 = io_dec_uops_0_lrs3_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_ldst_val = io_dec_uops_0_ldst_val_0; // @[rename-stage.scala:101:29, :160:7]
wire [1:0] ren1_uops_0_dst_rtype = io_dec_uops_0_dst_rtype_0; // @[rename-stage.scala:101:29, :160:7]
wire [1:0] ren1_uops_0_lrs1_rtype = io_dec_uops_0_lrs1_rtype_0; // @[rename-stage.scala:101:29, :160:7]
wire [1:0] ren1_uops_0_lrs2_rtype = io_dec_uops_0_lrs2_rtype_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_frs3_en = io_dec_uops_0_frs3_en_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_fp_val = io_dec_uops_0_fp_val_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_fp_single = io_dec_uops_0_fp_single_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_xcpt_pf_if = io_dec_uops_0_xcpt_pf_if_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_xcpt_ae_if = io_dec_uops_0_xcpt_ae_if_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_bp_debug_if = io_dec_uops_0_bp_debug_if_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_0_bp_xcpt_if = io_dec_uops_0_bp_xcpt_if_0; // @[rename-stage.scala:101:29, :160:7]
wire [1:0] ren1_uops_0_debug_fsrc = io_dec_uops_0_debug_fsrc_0; // @[rename-stage.scala:101:29, :160:7]
wire [6:0] ren1_uops_1_uopc = io_dec_uops_1_uopc_0; // @[rename-stage.scala:101:29, :160:7]
wire [31:0] ren1_uops_1_inst = io_dec_uops_1_inst_0; // @[rename-stage.scala:101:29, :160:7]
wire [31:0] ren1_uops_1_debug_inst = io_dec_uops_1_debug_inst_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_is_rvc = io_dec_uops_1_is_rvc_0; // @[rename-stage.scala:101:29, :160:7]
wire [39:0] ren1_uops_1_debug_pc = io_dec_uops_1_debug_pc_0; // @[rename-stage.scala:101:29, :160:7]
wire [2:0] ren1_uops_1_iq_type = io_dec_uops_1_iq_type_0; // @[rename-stage.scala:101:29, :160:7]
wire [9:0] ren1_uops_1_fu_code = io_dec_uops_1_fu_code_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_is_br = io_dec_uops_1_is_br_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_is_jalr = io_dec_uops_1_is_jalr_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_is_jal = io_dec_uops_1_is_jal_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_is_sfb = io_dec_uops_1_is_sfb_0; // @[rename-stage.scala:101:29, :160:7]
wire [15:0] ren1_uops_1_br_mask = io_dec_uops_1_br_mask_0; // @[rename-stage.scala:101:29, :160:7]
wire [3:0] ren1_uops_1_br_tag = io_dec_uops_1_br_tag_0; // @[rename-stage.scala:101:29, :160:7]
wire [4:0] ren1_uops_1_ftq_idx = io_dec_uops_1_ftq_idx_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_edge_inst = io_dec_uops_1_edge_inst_0; // @[rename-stage.scala:101:29, :160:7]
wire [5:0] ren1_uops_1_pc_lob = io_dec_uops_1_pc_lob_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_taken = io_dec_uops_1_taken_0; // @[rename-stage.scala:101:29, :160:7]
wire [19:0] ren1_uops_1_imm_packed = io_dec_uops_1_imm_packed_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_exception = io_dec_uops_1_exception_0; // @[rename-stage.scala:101:29, :160:7]
wire [63:0] ren1_uops_1_exc_cause = io_dec_uops_1_exc_cause_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_bypassable = io_dec_uops_1_bypassable_0; // @[rename-stage.scala:101:29, :160:7]
wire [4:0] ren1_uops_1_mem_cmd = io_dec_uops_1_mem_cmd_0; // @[rename-stage.scala:101:29, :160:7]
wire [1:0] ren1_uops_1_mem_size = io_dec_uops_1_mem_size_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_mem_signed = io_dec_uops_1_mem_signed_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_is_fence = io_dec_uops_1_is_fence_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_is_fencei = io_dec_uops_1_is_fencei_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_is_amo = io_dec_uops_1_is_amo_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_uses_ldq = io_dec_uops_1_uses_ldq_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_uses_stq = io_dec_uops_1_uses_stq_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_is_sys_pc2epc = io_dec_uops_1_is_sys_pc2epc_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_is_unique = io_dec_uops_1_is_unique_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_flush_on_commit = io_dec_uops_1_flush_on_commit_0; // @[rename-stage.scala:101:29, :160:7]
wire [5:0] ren1_uops_1_ldst = io_dec_uops_1_ldst_0; // @[rename-stage.scala:101:29, :160:7]
wire [5:0] ren1_uops_1_lrs1 = io_dec_uops_1_lrs1_0; // @[rename-stage.scala:101:29, :160:7]
wire [5:0] ren1_uops_1_lrs2 = io_dec_uops_1_lrs2_0; // @[rename-stage.scala:101:29, :160:7]
wire [5:0] ren1_uops_1_lrs3 = io_dec_uops_1_lrs3_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_ldst_val = io_dec_uops_1_ldst_val_0; // @[rename-stage.scala:101:29, :160:7]
wire [1:0] ren1_uops_1_dst_rtype = io_dec_uops_1_dst_rtype_0; // @[rename-stage.scala:101:29, :160:7]
wire [1:0] ren1_uops_1_lrs1_rtype = io_dec_uops_1_lrs1_rtype_0; // @[rename-stage.scala:101:29, :160:7]
wire [1:0] ren1_uops_1_lrs2_rtype = io_dec_uops_1_lrs2_rtype_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_frs3_en = io_dec_uops_1_frs3_en_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_fp_val = io_dec_uops_1_fp_val_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_fp_single = io_dec_uops_1_fp_single_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_xcpt_pf_if = io_dec_uops_1_xcpt_pf_if_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_xcpt_ae_if = io_dec_uops_1_xcpt_ae_if_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_bp_debug_if = io_dec_uops_1_bp_debug_if_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_1_bp_xcpt_if = io_dec_uops_1_bp_xcpt_if_0; // @[rename-stage.scala:101:29, :160:7]
wire [1:0] ren1_uops_1_debug_fsrc = io_dec_uops_1_debug_fsrc_0; // @[rename-stage.scala:101:29, :160:7]
wire [6:0] ren1_uops_2_uopc = io_dec_uops_2_uopc_0; // @[rename-stage.scala:101:29, :160:7]
wire [31:0] ren1_uops_2_inst = io_dec_uops_2_inst_0; // @[rename-stage.scala:101:29, :160:7]
wire [31:0] ren1_uops_2_debug_inst = io_dec_uops_2_debug_inst_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_is_rvc = io_dec_uops_2_is_rvc_0; // @[rename-stage.scala:101:29, :160:7]
wire [39:0] ren1_uops_2_debug_pc = io_dec_uops_2_debug_pc_0; // @[rename-stage.scala:101:29, :160:7]
wire [2:0] ren1_uops_2_iq_type = io_dec_uops_2_iq_type_0; // @[rename-stage.scala:101:29, :160:7]
wire [9:0] ren1_uops_2_fu_code = io_dec_uops_2_fu_code_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_is_br = io_dec_uops_2_is_br_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_is_jalr = io_dec_uops_2_is_jalr_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_is_jal = io_dec_uops_2_is_jal_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_is_sfb = io_dec_uops_2_is_sfb_0; // @[rename-stage.scala:101:29, :160:7]
wire [15:0] ren1_uops_2_br_mask = io_dec_uops_2_br_mask_0; // @[rename-stage.scala:101:29, :160:7]
wire [3:0] ren1_uops_2_br_tag = io_dec_uops_2_br_tag_0; // @[rename-stage.scala:101:29, :160:7]
wire [4:0] ren1_uops_2_ftq_idx = io_dec_uops_2_ftq_idx_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_edge_inst = io_dec_uops_2_edge_inst_0; // @[rename-stage.scala:101:29, :160:7]
wire [5:0] ren1_uops_2_pc_lob = io_dec_uops_2_pc_lob_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_taken = io_dec_uops_2_taken_0; // @[rename-stage.scala:101:29, :160:7]
wire [19:0] ren1_uops_2_imm_packed = io_dec_uops_2_imm_packed_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_exception = io_dec_uops_2_exception_0; // @[rename-stage.scala:101:29, :160:7]
wire [63:0] ren1_uops_2_exc_cause = io_dec_uops_2_exc_cause_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_bypassable = io_dec_uops_2_bypassable_0; // @[rename-stage.scala:101:29, :160:7]
wire [4:0] ren1_uops_2_mem_cmd = io_dec_uops_2_mem_cmd_0; // @[rename-stage.scala:101:29, :160:7]
wire [1:0] ren1_uops_2_mem_size = io_dec_uops_2_mem_size_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_mem_signed = io_dec_uops_2_mem_signed_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_is_fence = io_dec_uops_2_is_fence_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_is_fencei = io_dec_uops_2_is_fencei_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_is_amo = io_dec_uops_2_is_amo_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_uses_ldq = io_dec_uops_2_uses_ldq_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_uses_stq = io_dec_uops_2_uses_stq_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_is_sys_pc2epc = io_dec_uops_2_is_sys_pc2epc_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_is_unique = io_dec_uops_2_is_unique_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_flush_on_commit = io_dec_uops_2_flush_on_commit_0; // @[rename-stage.scala:101:29, :160:7]
wire [5:0] ren1_uops_2_ldst = io_dec_uops_2_ldst_0; // @[rename-stage.scala:101:29, :160:7]
wire [5:0] ren1_uops_2_lrs1 = io_dec_uops_2_lrs1_0; // @[rename-stage.scala:101:29, :160:7]
wire [5:0] ren1_uops_2_lrs2 = io_dec_uops_2_lrs2_0; // @[rename-stage.scala:101:29, :160:7]
wire [5:0] ren1_uops_2_lrs3 = io_dec_uops_2_lrs3_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_ldst_val = io_dec_uops_2_ldst_val_0; // @[rename-stage.scala:101:29, :160:7]
wire [1:0] ren1_uops_2_dst_rtype = io_dec_uops_2_dst_rtype_0; // @[rename-stage.scala:101:29, :160:7]
wire [1:0] ren1_uops_2_lrs1_rtype = io_dec_uops_2_lrs1_rtype_0; // @[rename-stage.scala:101:29, :160:7]
wire [1:0] ren1_uops_2_lrs2_rtype = io_dec_uops_2_lrs2_rtype_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_frs3_en = io_dec_uops_2_frs3_en_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_fp_val = io_dec_uops_2_fp_val_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_fp_single = io_dec_uops_2_fp_single_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_xcpt_pf_if = io_dec_uops_2_xcpt_pf_if_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_xcpt_ae_if = io_dec_uops_2_xcpt_ae_if_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_bp_debug_if = io_dec_uops_2_bp_debug_if_0; // @[rename-stage.scala:101:29, :160:7]
wire ren1_uops_2_bp_xcpt_if = io_dec_uops_2_bp_xcpt_if_0; // @[rename-stage.scala:101:29, :160:7]
wire [1:0] ren1_uops_2_debug_fsrc = io_dec_uops_2_debug_fsrc_0; // @[rename-stage.scala:101:29, :160:7]
wire ren2_valids_0; // @[rename-stage.scala:107:29]
wire ren2_valids_1; // @[rename-stage.scala:107:29]
wire ren2_valids_2; // @[rename-stage.scala:107:29]
wire [6:0] io_ren2_uops_0_newuop_uopc; // @[util.scala:73:26]
wire [31:0] io_ren2_uops_0_newuop_inst; // @[util.scala:73:26]
wire [31:0] io_ren2_uops_0_newuop_debug_inst; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_is_rvc; // @[util.scala:73:26]
wire [39:0] io_ren2_uops_0_newuop_debug_pc; // @[util.scala:73:26]
wire [2:0] io_ren2_uops_0_newuop_iq_type; // @[util.scala:73:26]
wire [9:0] io_ren2_uops_0_newuop_fu_code; // @[util.scala:73:26]
wire [3:0] io_ren2_uops_0_newuop_ctrl_br_type; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_0_newuop_ctrl_op1_sel; // @[util.scala:73:26]
wire [2:0] io_ren2_uops_0_newuop_ctrl_op2_sel; // @[util.scala:73:26]
wire [2:0] io_ren2_uops_0_newuop_ctrl_imm_sel; // @[util.scala:73:26]
wire [4:0] io_ren2_uops_0_newuop_ctrl_op_fcn; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_ctrl_fcn_dw; // @[util.scala:73:26]
wire [2:0] io_ren2_uops_0_newuop_ctrl_csr_cmd; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_ctrl_is_load; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_ctrl_is_sta; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_ctrl_is_std; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_0_newuop_iw_state; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_iw_p1_poisoned; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_iw_p2_poisoned; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_is_br; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_is_jalr; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_is_jal; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_is_sfb; // @[util.scala:73:26]
wire [15:0] io_ren2_uops_0_newuop_br_mask; // @[util.scala:73:26]
wire [3:0] io_ren2_uops_0_newuop_br_tag; // @[util.scala:73:26]
wire [4:0] io_ren2_uops_0_newuop_ftq_idx; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_edge_inst; // @[util.scala:73:26]
wire [5:0] io_ren2_uops_0_newuop_pc_lob; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_taken; // @[util.scala:73:26]
wire [19:0] io_ren2_uops_0_newuop_imm_packed; // @[util.scala:73:26]
wire [11:0] io_ren2_uops_0_newuop_csr_addr; // @[util.scala:73:26]
wire [6:0] io_ren2_uops_0_newuop_rob_idx; // @[util.scala:73:26]
wire [4:0] io_ren2_uops_0_newuop_ldq_idx; // @[util.scala:73:26]
wire [4:0] io_ren2_uops_0_newuop_stq_idx; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_0_newuop_rxq_idx; // @[util.scala:73:26]
wire [6:0] io_ren2_uops_0_newuop_pdst; // @[util.scala:73:26]
wire [6:0] io_ren2_uops_0_newuop_prs1; // @[util.scala:73:26]
wire [6:0] io_ren2_uops_0_newuop_prs2; // @[util.scala:73:26]
wire [6:0] io_ren2_uops_0_newuop_prs3; // @[util.scala:73:26]
wire [4:0] io_ren2_uops_0_newuop_ppred; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_prs1_busy; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_prs2_busy; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_prs3_busy; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_ppred_busy; // @[util.scala:73:26]
wire [6:0] io_ren2_uops_0_newuop_stale_pdst; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_exception; // @[util.scala:73:26]
wire [63:0] io_ren2_uops_0_newuop_exc_cause; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_bypassable; // @[util.scala:73:26]
wire [4:0] io_ren2_uops_0_newuop_mem_cmd; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_0_newuop_mem_size; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_mem_signed; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_is_fence; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_is_fencei; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_is_amo; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_uses_ldq; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_uses_stq; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_is_sys_pc2epc; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_is_unique; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_flush_on_commit; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_ldst_is_rs1; // @[util.scala:73:26]
wire [5:0] io_ren2_uops_0_newuop_ldst; // @[util.scala:73:26]
wire [5:0] io_ren2_uops_0_newuop_lrs1; // @[util.scala:73:26]
wire [5:0] io_ren2_uops_0_newuop_lrs2; // @[util.scala:73:26]
wire [5:0] io_ren2_uops_0_newuop_lrs3; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_ldst_val; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_0_newuop_dst_rtype; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_0_newuop_lrs1_rtype; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_0_newuop_lrs2_rtype; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_frs3_en; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_fp_val; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_fp_single; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_xcpt_pf_if; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_xcpt_ae_if; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_xcpt_ma_if; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_bp_debug_if; // @[util.scala:73:26]
wire io_ren2_uops_0_newuop_bp_xcpt_if; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_0_newuop_debug_fsrc; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_0_newuop_debug_tsrc; // @[util.scala:73:26]
wire [6:0] io_ren2_uops_1_newuop_uopc; // @[util.scala:73:26]
wire [31:0] io_ren2_uops_1_newuop_inst; // @[util.scala:73:26]
wire [31:0] io_ren2_uops_1_newuop_debug_inst; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_is_rvc; // @[util.scala:73:26]
wire [39:0] io_ren2_uops_1_newuop_debug_pc; // @[util.scala:73:26]
wire [2:0] io_ren2_uops_1_newuop_iq_type; // @[util.scala:73:26]
wire [9:0] io_ren2_uops_1_newuop_fu_code; // @[util.scala:73:26]
wire [3:0] io_ren2_uops_1_newuop_ctrl_br_type; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_1_newuop_ctrl_op1_sel; // @[util.scala:73:26]
wire [2:0] io_ren2_uops_1_newuop_ctrl_op2_sel; // @[util.scala:73:26]
wire [2:0] io_ren2_uops_1_newuop_ctrl_imm_sel; // @[util.scala:73:26]
wire [4:0] io_ren2_uops_1_newuop_ctrl_op_fcn; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_ctrl_fcn_dw; // @[util.scala:73:26]
wire [2:0] io_ren2_uops_1_newuop_ctrl_csr_cmd; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_ctrl_is_load; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_ctrl_is_sta; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_ctrl_is_std; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_1_newuop_iw_state; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_iw_p1_poisoned; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_iw_p2_poisoned; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_is_br; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_is_jalr; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_is_jal; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_is_sfb; // @[util.scala:73:26]
wire [15:0] io_ren2_uops_1_newuop_br_mask; // @[util.scala:73:26]
wire [3:0] io_ren2_uops_1_newuop_br_tag; // @[util.scala:73:26]
wire [4:0] io_ren2_uops_1_newuop_ftq_idx; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_edge_inst; // @[util.scala:73:26]
wire [5:0] io_ren2_uops_1_newuop_pc_lob; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_taken; // @[util.scala:73:26]
wire [19:0] io_ren2_uops_1_newuop_imm_packed; // @[util.scala:73:26]
wire [11:0] io_ren2_uops_1_newuop_csr_addr; // @[util.scala:73:26]
wire [6:0] io_ren2_uops_1_newuop_rob_idx; // @[util.scala:73:26]
wire [4:0] io_ren2_uops_1_newuop_ldq_idx; // @[util.scala:73:26]
wire [4:0] io_ren2_uops_1_newuop_stq_idx; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_1_newuop_rxq_idx; // @[util.scala:73:26]
wire [6:0] io_ren2_uops_1_newuop_pdst; // @[util.scala:73:26]
wire [6:0] io_ren2_uops_1_newuop_prs1; // @[util.scala:73:26]
wire [6:0] io_ren2_uops_1_newuop_prs2; // @[util.scala:73:26]
wire [6:0] io_ren2_uops_1_newuop_prs3; // @[util.scala:73:26]
wire [4:0] io_ren2_uops_1_newuop_ppred; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_prs1_busy; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_prs2_busy; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_prs3_busy; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_ppred_busy; // @[util.scala:73:26]
wire [6:0] io_ren2_uops_1_newuop_stale_pdst; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_exception; // @[util.scala:73:26]
wire [63:0] io_ren2_uops_1_newuop_exc_cause; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_bypassable; // @[util.scala:73:26]
wire [4:0] io_ren2_uops_1_newuop_mem_cmd; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_1_newuop_mem_size; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_mem_signed; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_is_fence; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_is_fencei; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_is_amo; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_uses_ldq; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_uses_stq; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_is_sys_pc2epc; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_is_unique; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_flush_on_commit; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_ldst_is_rs1; // @[util.scala:73:26]
wire [5:0] io_ren2_uops_1_newuop_ldst; // @[util.scala:73:26]
wire [5:0] io_ren2_uops_1_newuop_lrs1; // @[util.scala:73:26]
wire [5:0] io_ren2_uops_1_newuop_lrs2; // @[util.scala:73:26]
wire [5:0] io_ren2_uops_1_newuop_lrs3; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_ldst_val; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_1_newuop_dst_rtype; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_1_newuop_lrs1_rtype; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_1_newuop_lrs2_rtype; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_frs3_en; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_fp_val; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_fp_single; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_xcpt_pf_if; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_xcpt_ae_if; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_xcpt_ma_if; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_bp_debug_if; // @[util.scala:73:26]
wire io_ren2_uops_1_newuop_bp_xcpt_if; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_1_newuop_debug_fsrc; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_1_newuop_debug_tsrc; // @[util.scala:73:26]
wire [6:0] io_ren2_uops_2_newuop_uopc; // @[util.scala:73:26]
wire [31:0] io_ren2_uops_2_newuop_inst; // @[util.scala:73:26]
wire [31:0] io_ren2_uops_2_newuop_debug_inst; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_is_rvc; // @[util.scala:73:26]
wire [39:0] io_ren2_uops_2_newuop_debug_pc; // @[util.scala:73:26]
wire [2:0] io_ren2_uops_2_newuop_iq_type; // @[util.scala:73:26]
wire [9:0] io_ren2_uops_2_newuop_fu_code; // @[util.scala:73:26]
wire [3:0] io_ren2_uops_2_newuop_ctrl_br_type; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_2_newuop_ctrl_op1_sel; // @[util.scala:73:26]
wire [2:0] io_ren2_uops_2_newuop_ctrl_op2_sel; // @[util.scala:73:26]
wire [2:0] io_ren2_uops_2_newuop_ctrl_imm_sel; // @[util.scala:73:26]
wire [4:0] io_ren2_uops_2_newuop_ctrl_op_fcn; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_ctrl_fcn_dw; // @[util.scala:73:26]
wire [2:0] io_ren2_uops_2_newuop_ctrl_csr_cmd; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_ctrl_is_load; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_ctrl_is_sta; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_ctrl_is_std; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_2_newuop_iw_state; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_iw_p1_poisoned; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_iw_p2_poisoned; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_is_br; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_is_jalr; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_is_jal; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_is_sfb; // @[util.scala:73:26]
wire [15:0] io_ren2_uops_2_newuop_br_mask; // @[util.scala:73:26]
wire [3:0] io_ren2_uops_2_newuop_br_tag; // @[util.scala:73:26]
wire [4:0] io_ren2_uops_2_newuop_ftq_idx; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_edge_inst; // @[util.scala:73:26]
wire [5:0] io_ren2_uops_2_newuop_pc_lob; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_taken; // @[util.scala:73:26]
wire [19:0] io_ren2_uops_2_newuop_imm_packed; // @[util.scala:73:26]
wire [11:0] io_ren2_uops_2_newuop_csr_addr; // @[util.scala:73:26]
wire [6:0] io_ren2_uops_2_newuop_rob_idx; // @[util.scala:73:26]
wire [4:0] io_ren2_uops_2_newuop_ldq_idx; // @[util.scala:73:26]
wire [4:0] io_ren2_uops_2_newuop_stq_idx; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_2_newuop_rxq_idx; // @[util.scala:73:26]
wire [6:0] io_ren2_uops_2_newuop_pdst; // @[util.scala:73:26]
wire [6:0] io_ren2_uops_2_newuop_prs1; // @[util.scala:73:26]
wire [6:0] io_ren2_uops_2_newuop_prs2; // @[util.scala:73:26]
wire [6:0] io_ren2_uops_2_newuop_prs3; // @[util.scala:73:26]
wire [4:0] io_ren2_uops_2_newuop_ppred; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_prs1_busy; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_prs2_busy; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_prs3_busy; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_ppred_busy; // @[util.scala:73:26]
wire [6:0] io_ren2_uops_2_newuop_stale_pdst; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_exception; // @[util.scala:73:26]
wire [63:0] io_ren2_uops_2_newuop_exc_cause; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_bypassable; // @[util.scala:73:26]
wire [4:0] io_ren2_uops_2_newuop_mem_cmd; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_2_newuop_mem_size; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_mem_signed; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_is_fence; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_is_fencei; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_is_amo; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_uses_ldq; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_uses_stq; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_is_sys_pc2epc; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_is_unique; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_flush_on_commit; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_ldst_is_rs1; // @[util.scala:73:26]
wire [5:0] io_ren2_uops_2_newuop_ldst; // @[util.scala:73:26]
wire [5:0] io_ren2_uops_2_newuop_lrs1; // @[util.scala:73:26]
wire [5:0] io_ren2_uops_2_newuop_lrs2; // @[util.scala:73:26]
wire [5:0] io_ren2_uops_2_newuop_lrs3; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_ldst_val; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_2_newuop_dst_rtype; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_2_newuop_lrs1_rtype; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_2_newuop_lrs2_rtype; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_frs3_en; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_fp_val; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_fp_single; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_xcpt_pf_if; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_xcpt_ae_if; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_xcpt_ma_if; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_bp_debug_if; // @[util.scala:73:26]
wire io_ren2_uops_2_newuop_bp_xcpt_if; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_2_newuop_debug_fsrc; // @[util.scala:73:26]
wire [1:0] io_ren2_uops_2_newuop_debug_tsrc; // @[util.scala:73:26]
wire io_ren_stalls_0_0; // @[rename-stage.scala:160:7]
wire io_ren_stalls_1_0; // @[rename-stage.scala:160:7]
wire io_ren_stalls_2_0; // @[rename-stage.scala:160:7]
wire io_ren2_mask_0; // @[rename-stage.scala:160:7]
wire io_ren2_mask_1; // @[rename-stage.scala:160:7]
wire io_ren2_mask_2; // @[rename-stage.scala:160:7]
wire [3:0] io_ren2_uops_0_ctrl_br_type; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_0_ctrl_op1_sel; // @[rename-stage.scala:160:7]
wire [2:0] io_ren2_uops_0_ctrl_op2_sel; // @[rename-stage.scala:160:7]
wire [2:0] io_ren2_uops_0_ctrl_imm_sel; // @[rename-stage.scala:160:7]
wire [4:0] io_ren2_uops_0_ctrl_op_fcn; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_ctrl_fcn_dw; // @[rename-stage.scala:160:7]
wire [2:0] io_ren2_uops_0_ctrl_csr_cmd; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_ctrl_is_load; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_ctrl_is_sta; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_ctrl_is_std; // @[rename-stage.scala:160:7]
wire [6:0] io_ren2_uops_0_uopc; // @[rename-stage.scala:160:7]
wire [31:0] io_ren2_uops_0_inst; // @[rename-stage.scala:160:7]
wire [31:0] io_ren2_uops_0_debug_inst; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_is_rvc; // @[rename-stage.scala:160:7]
wire [39:0] io_ren2_uops_0_debug_pc; // @[rename-stage.scala:160:7]
wire [2:0] io_ren2_uops_0_iq_type; // @[rename-stage.scala:160:7]
wire [9:0] io_ren2_uops_0_fu_code; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_0_iw_state; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_iw_p1_poisoned; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_iw_p2_poisoned; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_is_br; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_is_jalr; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_is_jal; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_is_sfb; // @[rename-stage.scala:160:7]
wire [15:0] io_ren2_uops_0_br_mask; // @[rename-stage.scala:160:7]
wire [3:0] io_ren2_uops_0_br_tag; // @[rename-stage.scala:160:7]
wire [4:0] io_ren2_uops_0_ftq_idx; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_edge_inst; // @[rename-stage.scala:160:7]
wire [5:0] io_ren2_uops_0_pc_lob; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_taken; // @[rename-stage.scala:160:7]
wire [19:0] io_ren2_uops_0_imm_packed; // @[rename-stage.scala:160:7]
wire [11:0] io_ren2_uops_0_csr_addr; // @[rename-stage.scala:160:7]
wire [6:0] io_ren2_uops_0_rob_idx; // @[rename-stage.scala:160:7]
wire [4:0] io_ren2_uops_0_ldq_idx; // @[rename-stage.scala:160:7]
wire [4:0] io_ren2_uops_0_stq_idx; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_0_rxq_idx; // @[rename-stage.scala:160:7]
wire [6:0] io_ren2_uops_0_pdst_0; // @[rename-stage.scala:160:7]
wire [6:0] io_ren2_uops_0_prs1_0; // @[rename-stage.scala:160:7]
wire [6:0] io_ren2_uops_0_prs2_0; // @[rename-stage.scala:160:7]
wire [6:0] io_ren2_uops_0_prs3_0; // @[rename-stage.scala:160:7]
wire [4:0] io_ren2_uops_0_ppred; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_prs1_busy_0; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_prs2_busy_0; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_prs3_busy_0; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_ppred_busy; // @[rename-stage.scala:160:7]
wire [6:0] io_ren2_uops_0_stale_pdst_0; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_exception; // @[rename-stage.scala:160:7]
wire [63:0] io_ren2_uops_0_exc_cause; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_bypassable; // @[rename-stage.scala:160:7]
wire [4:0] io_ren2_uops_0_mem_cmd; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_0_mem_size; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_mem_signed; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_is_fence; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_is_fencei; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_is_amo; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_uses_ldq; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_uses_stq; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_is_sys_pc2epc; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_is_unique; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_flush_on_commit; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_ldst_is_rs1; // @[rename-stage.scala:160:7]
wire [5:0] io_ren2_uops_0_ldst; // @[rename-stage.scala:160:7]
wire [5:0] io_ren2_uops_0_lrs1; // @[rename-stage.scala:160:7]
wire [5:0] io_ren2_uops_0_lrs2; // @[rename-stage.scala:160:7]
wire [5:0] io_ren2_uops_0_lrs3; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_ldst_val; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_0_dst_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_0_lrs1_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_0_lrs2_rtype; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_frs3_en; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_fp_val; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_fp_single; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_xcpt_pf_if; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_xcpt_ae_if; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_xcpt_ma_if; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_bp_debug_if; // @[rename-stage.scala:160:7]
wire io_ren2_uops_0_bp_xcpt_if; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_0_debug_fsrc; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_0_debug_tsrc; // @[rename-stage.scala:160:7]
wire [3:0] io_ren2_uops_1_ctrl_br_type; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_1_ctrl_op1_sel; // @[rename-stage.scala:160:7]
wire [2:0] io_ren2_uops_1_ctrl_op2_sel; // @[rename-stage.scala:160:7]
wire [2:0] io_ren2_uops_1_ctrl_imm_sel; // @[rename-stage.scala:160:7]
wire [4:0] io_ren2_uops_1_ctrl_op_fcn; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_ctrl_fcn_dw; // @[rename-stage.scala:160:7]
wire [2:0] io_ren2_uops_1_ctrl_csr_cmd; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_ctrl_is_load; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_ctrl_is_sta; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_ctrl_is_std; // @[rename-stage.scala:160:7]
wire [6:0] io_ren2_uops_1_uopc; // @[rename-stage.scala:160:7]
wire [31:0] io_ren2_uops_1_inst; // @[rename-stage.scala:160:7]
wire [31:0] io_ren2_uops_1_debug_inst; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_is_rvc; // @[rename-stage.scala:160:7]
wire [39:0] io_ren2_uops_1_debug_pc; // @[rename-stage.scala:160:7]
wire [2:0] io_ren2_uops_1_iq_type; // @[rename-stage.scala:160:7]
wire [9:0] io_ren2_uops_1_fu_code; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_1_iw_state; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_iw_p1_poisoned; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_iw_p2_poisoned; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_is_br; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_is_jalr; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_is_jal; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_is_sfb; // @[rename-stage.scala:160:7]
wire [15:0] io_ren2_uops_1_br_mask; // @[rename-stage.scala:160:7]
wire [3:0] io_ren2_uops_1_br_tag; // @[rename-stage.scala:160:7]
wire [4:0] io_ren2_uops_1_ftq_idx; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_edge_inst; // @[rename-stage.scala:160:7]
wire [5:0] io_ren2_uops_1_pc_lob; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_taken; // @[rename-stage.scala:160:7]
wire [19:0] io_ren2_uops_1_imm_packed; // @[rename-stage.scala:160:7]
wire [11:0] io_ren2_uops_1_csr_addr; // @[rename-stage.scala:160:7]
wire [6:0] io_ren2_uops_1_rob_idx; // @[rename-stage.scala:160:7]
wire [4:0] io_ren2_uops_1_ldq_idx; // @[rename-stage.scala:160:7]
wire [4:0] io_ren2_uops_1_stq_idx; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_1_rxq_idx; // @[rename-stage.scala:160:7]
wire [6:0] io_ren2_uops_1_pdst_0; // @[rename-stage.scala:160:7]
wire [6:0] io_ren2_uops_1_prs1_0; // @[rename-stage.scala:160:7]
wire [6:0] io_ren2_uops_1_prs2_0; // @[rename-stage.scala:160:7]
wire [6:0] io_ren2_uops_1_prs3_0; // @[rename-stage.scala:160:7]
wire [4:0] io_ren2_uops_1_ppred; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_prs1_busy_0; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_prs2_busy_0; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_prs3_busy_0; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_ppred_busy; // @[rename-stage.scala:160:7]
wire [6:0] io_ren2_uops_1_stale_pdst_0; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_exception; // @[rename-stage.scala:160:7]
wire [63:0] io_ren2_uops_1_exc_cause; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_bypassable; // @[rename-stage.scala:160:7]
wire [4:0] io_ren2_uops_1_mem_cmd; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_1_mem_size; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_mem_signed; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_is_fence; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_is_fencei; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_is_amo; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_uses_ldq; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_uses_stq; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_is_sys_pc2epc; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_is_unique; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_flush_on_commit; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_ldst_is_rs1; // @[rename-stage.scala:160:7]
wire [5:0] io_ren2_uops_1_ldst; // @[rename-stage.scala:160:7]
wire [5:0] io_ren2_uops_1_lrs1; // @[rename-stage.scala:160:7]
wire [5:0] io_ren2_uops_1_lrs2; // @[rename-stage.scala:160:7]
wire [5:0] io_ren2_uops_1_lrs3; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_ldst_val; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_1_dst_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_1_lrs1_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_1_lrs2_rtype; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_frs3_en; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_fp_val; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_fp_single; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_xcpt_pf_if; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_xcpt_ae_if; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_xcpt_ma_if; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_bp_debug_if; // @[rename-stage.scala:160:7]
wire io_ren2_uops_1_bp_xcpt_if; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_1_debug_fsrc; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_1_debug_tsrc; // @[rename-stage.scala:160:7]
wire [3:0] io_ren2_uops_2_ctrl_br_type; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_2_ctrl_op1_sel; // @[rename-stage.scala:160:7]
wire [2:0] io_ren2_uops_2_ctrl_op2_sel; // @[rename-stage.scala:160:7]
wire [2:0] io_ren2_uops_2_ctrl_imm_sel; // @[rename-stage.scala:160:7]
wire [4:0] io_ren2_uops_2_ctrl_op_fcn; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_ctrl_fcn_dw; // @[rename-stage.scala:160:7]
wire [2:0] io_ren2_uops_2_ctrl_csr_cmd; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_ctrl_is_load; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_ctrl_is_sta; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_ctrl_is_std; // @[rename-stage.scala:160:7]
wire [6:0] io_ren2_uops_2_uopc; // @[rename-stage.scala:160:7]
wire [31:0] io_ren2_uops_2_inst; // @[rename-stage.scala:160:7]
wire [31:0] io_ren2_uops_2_debug_inst; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_is_rvc; // @[rename-stage.scala:160:7]
wire [39:0] io_ren2_uops_2_debug_pc; // @[rename-stage.scala:160:7]
wire [2:0] io_ren2_uops_2_iq_type; // @[rename-stage.scala:160:7]
wire [9:0] io_ren2_uops_2_fu_code; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_2_iw_state; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_iw_p1_poisoned; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_iw_p2_poisoned; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_is_br; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_is_jalr; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_is_jal; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_is_sfb; // @[rename-stage.scala:160:7]
wire [15:0] io_ren2_uops_2_br_mask; // @[rename-stage.scala:160:7]
wire [3:0] io_ren2_uops_2_br_tag; // @[rename-stage.scala:160:7]
wire [4:0] io_ren2_uops_2_ftq_idx; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_edge_inst; // @[rename-stage.scala:160:7]
wire [5:0] io_ren2_uops_2_pc_lob; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_taken; // @[rename-stage.scala:160:7]
wire [19:0] io_ren2_uops_2_imm_packed; // @[rename-stage.scala:160:7]
wire [11:0] io_ren2_uops_2_csr_addr; // @[rename-stage.scala:160:7]
wire [6:0] io_ren2_uops_2_rob_idx; // @[rename-stage.scala:160:7]
wire [4:0] io_ren2_uops_2_ldq_idx; // @[rename-stage.scala:160:7]
wire [4:0] io_ren2_uops_2_stq_idx; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_2_rxq_idx; // @[rename-stage.scala:160:7]
wire [6:0] io_ren2_uops_2_pdst_0; // @[rename-stage.scala:160:7]
wire [6:0] io_ren2_uops_2_prs1_0; // @[rename-stage.scala:160:7]
wire [6:0] io_ren2_uops_2_prs2_0; // @[rename-stage.scala:160:7]
wire [6:0] io_ren2_uops_2_prs3_0; // @[rename-stage.scala:160:7]
wire [4:0] io_ren2_uops_2_ppred; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_prs1_busy_0; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_prs2_busy_0; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_prs3_busy_0; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_ppred_busy; // @[rename-stage.scala:160:7]
wire [6:0] io_ren2_uops_2_stale_pdst_0; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_exception; // @[rename-stage.scala:160:7]
wire [63:0] io_ren2_uops_2_exc_cause; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_bypassable; // @[rename-stage.scala:160:7]
wire [4:0] io_ren2_uops_2_mem_cmd; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_2_mem_size; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_mem_signed; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_is_fence; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_is_fencei; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_is_amo; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_uses_ldq; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_uses_stq; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_is_sys_pc2epc; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_is_unique; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_flush_on_commit; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_ldst_is_rs1; // @[rename-stage.scala:160:7]
wire [5:0] io_ren2_uops_2_ldst; // @[rename-stage.scala:160:7]
wire [5:0] io_ren2_uops_2_lrs1; // @[rename-stage.scala:160:7]
wire [5:0] io_ren2_uops_2_lrs2; // @[rename-stage.scala:160:7]
wire [5:0] io_ren2_uops_2_lrs3; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_ldst_val; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_2_dst_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_2_lrs1_rtype; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_2_lrs2_rtype; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_frs3_en; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_fp_val; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_fp_single; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_xcpt_pf_if; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_xcpt_ae_if; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_xcpt_ma_if; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_bp_debug_if; // @[rename-stage.scala:160:7]
wire io_ren2_uops_2_bp_xcpt_if; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_2_debug_fsrc; // @[rename-stage.scala:160:7]
wire [1:0] io_ren2_uops_2_debug_tsrc; // @[rename-stage.scala:160:7]
wire [95:0] io_debug_freelist; // @[rename-stage.scala:160:7]
wire [95:0] io_debug_busytable; // @[rename-stage.scala:160:7]
wire [5:0] map_reqs_0_ldst = ren1_uops_0_ldst; // @[rename-stage.scala:101:29, :252:24]
wire [5:0] map_reqs_0_lrs1 = ren1_uops_0_lrs1; // @[rename-stage.scala:101:29, :252:24]
wire [5:0] map_reqs_0_lrs2 = ren1_uops_0_lrs2; // @[rename-stage.scala:101:29, :252:24]
wire [5:0] map_reqs_0_lrs3 = ren1_uops_0_lrs3; // @[rename-stage.scala:101:29, :252:24]
wire [5:0] map_reqs_1_ldst = ren1_uops_1_ldst; // @[rename-stage.scala:101:29, :252:24]
wire [5:0] map_reqs_1_lrs1 = ren1_uops_1_lrs1; // @[rename-stage.scala:101:29, :252:24]
wire [5:0] map_reqs_1_lrs2 = ren1_uops_1_lrs2; // @[rename-stage.scala:101:29, :252:24]
wire [5:0] map_reqs_1_lrs3 = ren1_uops_1_lrs3; // @[rename-stage.scala:101:29, :252:24]
wire [5:0] map_reqs_2_ldst = ren1_uops_2_ldst; // @[rename-stage.scala:101:29, :252:24]
wire [5:0] map_reqs_2_lrs1 = ren1_uops_2_lrs1; // @[rename-stage.scala:101:29, :252:24]
wire [5:0] map_reqs_2_lrs2 = ren1_uops_2_lrs2; // @[rename-stage.scala:101:29, :252:24]
wire [5:0] map_reqs_2_lrs3 = ren1_uops_2_lrs3; // @[rename-stage.scala:101:29, :252:24]
wire [6:0] ren1_uops_0_prs1; // @[rename-stage.scala:101:29]
wire [6:0] ren1_uops_0_prs2; // @[rename-stage.scala:101:29]
wire [6:0] ren1_uops_0_prs3; // @[rename-stage.scala:101:29]
wire [6:0] ren1_uops_0_stale_pdst; // @[rename-stage.scala:101:29]
wire [6:0] ren1_uops_1_prs1; // @[rename-stage.scala:101:29]
wire [6:0] ren1_uops_1_prs2; // @[rename-stage.scala:101:29]
wire [6:0] ren1_uops_1_prs3; // @[rename-stage.scala:101:29]
wire [6:0] ren1_uops_1_stale_pdst; // @[rename-stage.scala:101:29]
wire [6:0] ren1_uops_2_prs1; // @[rename-stage.scala:101:29]
wire [6:0] ren1_uops_2_prs2; // @[rename-stage.scala:101:29]
wire [6:0] ren1_uops_2_prs3; // @[rename-stage.scala:101:29]
wire [6:0] ren1_uops_2_stale_pdst; // @[rename-stage.scala:101:29]
assign io_ren2_mask_0 = ren2_valids_0; // @[rename-stage.scala:107:29, :160:7]
assign io_ren2_mask_1 = ren2_valids_1; // @[rename-stage.scala:107:29, :160:7]
assign io_ren2_mask_2 = ren2_valids_2; // @[rename-stage.scala:107:29, :160:7]
wire [6:0] bypassed_uop_uopc = ren2_uops_0_uopc; // @[rename-stage.scala:108:29, :341:28]
wire [31:0] bypassed_uop_inst = ren2_uops_0_inst; // @[rename-stage.scala:108:29, :341:28]
wire [31:0] bypassed_uop_debug_inst = ren2_uops_0_debug_inst; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_is_rvc = ren2_uops_0_is_rvc; // @[rename-stage.scala:108:29, :341:28]
wire [39:0] bypassed_uop_debug_pc = ren2_uops_0_debug_pc; // @[rename-stage.scala:108:29, :341:28]
wire [2:0] bypassed_uop_iq_type = ren2_uops_0_iq_type; // @[rename-stage.scala:108:29, :341:28]
wire [9:0] bypassed_uop_fu_code = ren2_uops_0_fu_code; // @[rename-stage.scala:108:29, :341:28]
wire [3:0] bypassed_uop_ctrl_br_type = ren2_uops_0_ctrl_br_type; // @[rename-stage.scala:108:29, :341:28]
wire [1:0] bypassed_uop_ctrl_op1_sel = ren2_uops_0_ctrl_op1_sel; // @[rename-stage.scala:108:29, :341:28]
wire [2:0] bypassed_uop_ctrl_op2_sel = ren2_uops_0_ctrl_op2_sel; // @[rename-stage.scala:108:29, :341:28]
wire [2:0] bypassed_uop_ctrl_imm_sel = ren2_uops_0_ctrl_imm_sel; // @[rename-stage.scala:108:29, :341:28]
wire [4:0] bypassed_uop_ctrl_op_fcn = ren2_uops_0_ctrl_op_fcn; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_ctrl_fcn_dw = ren2_uops_0_ctrl_fcn_dw; // @[rename-stage.scala:108:29, :341:28]
wire [2:0] bypassed_uop_ctrl_csr_cmd = ren2_uops_0_ctrl_csr_cmd; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_ctrl_is_load = ren2_uops_0_ctrl_is_load; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_ctrl_is_sta = ren2_uops_0_ctrl_is_sta; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_ctrl_is_std = ren2_uops_0_ctrl_is_std; // @[rename-stage.scala:108:29, :341:28]
wire [1:0] bypassed_uop_iw_state = ren2_uops_0_iw_state; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_iw_p1_poisoned = ren2_uops_0_iw_p1_poisoned; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_iw_p2_poisoned = ren2_uops_0_iw_p2_poisoned; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_is_br = ren2_uops_0_is_br; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_is_jalr = ren2_uops_0_is_jalr; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_is_jal = ren2_uops_0_is_jal; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_is_sfb = ren2_uops_0_is_sfb; // @[rename-stage.scala:108:29, :341:28]
wire [15:0] bypassed_uop_br_mask = ren2_uops_0_br_mask; // @[rename-stage.scala:108:29, :341:28]
wire [3:0] ren2_br_tags_0_bits = ren2_uops_0_br_tag; // @[rename-stage.scala:108:29, :233:29]
wire [3:0] bypassed_uop_br_tag = ren2_uops_0_br_tag; // @[rename-stage.scala:108:29, :341:28]
wire [4:0] bypassed_uop_ftq_idx = ren2_uops_0_ftq_idx; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_edge_inst = ren2_uops_0_edge_inst; // @[rename-stage.scala:108:29, :341:28]
wire [5:0] bypassed_uop_pc_lob = ren2_uops_0_pc_lob; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_taken = ren2_uops_0_taken; // @[rename-stage.scala:108:29, :341:28]
wire [19:0] bypassed_uop_imm_packed = ren2_uops_0_imm_packed; // @[rename-stage.scala:108:29, :341:28]
wire [11:0] bypassed_uop_csr_addr = ren2_uops_0_csr_addr; // @[rename-stage.scala:108:29, :341:28]
wire [6:0] bypassed_uop_rob_idx = ren2_uops_0_rob_idx; // @[rename-stage.scala:108:29, :341:28]
wire [4:0] bypassed_uop_ldq_idx = ren2_uops_0_ldq_idx; // @[rename-stage.scala:108:29, :341:28]
wire [4:0] bypassed_uop_stq_idx = ren2_uops_0_stq_idx; // @[rename-stage.scala:108:29, :341:28]
wire [6:0] _ren2_uops_0_pdst_T_2; // @[rename-stage.scala:306:20]
wire [1:0] bypassed_uop_rxq_idx = ren2_uops_0_rxq_idx; // @[rename-stage.scala:108:29, :341:28]
wire [6:0] bypassed_uop_pdst = ren2_uops_0_pdst; // @[rename-stage.scala:108:29, :341:28]
wire [6:0] bypassed_uop_prs1 = ren2_uops_0_prs1; // @[rename-stage.scala:108:29, :341:28]
wire [6:0] bypassed_uop_prs2 = ren2_uops_0_prs2; // @[rename-stage.scala:108:29, :341:28]
wire [6:0] bypassed_uop_prs3 = ren2_uops_0_prs3; // @[rename-stage.scala:108:29, :341:28]
wire _ren2_uops_0_prs1_busy_T_1; // @[rename-stage.scala:323:47]
wire [4:0] bypassed_uop_ppred = ren2_uops_0_ppred; // @[rename-stage.scala:108:29, :341:28]
wire _ren2_uops_0_prs2_busy_T_1; // @[rename-stage.scala:324:47]
wire bypassed_uop_prs1_busy = ren2_uops_0_prs1_busy; // @[rename-stage.scala:108:29, :341:28]
wire _ren2_uops_0_prs3_busy_T; // @[rename-stage.scala:325:34]
wire bypassed_uop_prs2_busy = ren2_uops_0_prs2_busy; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_prs3_busy = ren2_uops_0_prs3_busy; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_ppred_busy = ren2_uops_0_ppred_busy; // @[rename-stage.scala:108:29, :341:28]
wire [6:0] bypassed_uop_stale_pdst = ren2_uops_0_stale_pdst; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_exception = ren2_uops_0_exception; // @[rename-stage.scala:108:29, :341:28]
wire [63:0] bypassed_uop_exc_cause = ren2_uops_0_exc_cause; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_bypassable = ren2_uops_0_bypassable; // @[rename-stage.scala:108:29, :341:28]
wire [4:0] bypassed_uop_mem_cmd = ren2_uops_0_mem_cmd; // @[rename-stage.scala:108:29, :341:28]
wire [1:0] bypassed_uop_mem_size = ren2_uops_0_mem_size; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_mem_signed = ren2_uops_0_mem_signed; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_is_fence = ren2_uops_0_is_fence; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_is_fencei = ren2_uops_0_is_fencei; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_is_amo = ren2_uops_0_is_amo; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_uses_ldq = ren2_uops_0_uses_ldq; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_uses_stq = ren2_uops_0_uses_stq; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_is_sys_pc2epc = ren2_uops_0_is_sys_pc2epc; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_is_unique = ren2_uops_0_is_unique; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_flush_on_commit = ren2_uops_0_flush_on_commit; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_ldst_is_rs1 = ren2_uops_0_ldst_is_rs1; // @[rename-stage.scala:108:29, :341:28]
wire [5:0] bypassed_uop_ldst = ren2_uops_0_ldst; // @[rename-stage.scala:108:29, :341:28]
wire [5:0] bypassed_uop_lrs1 = ren2_uops_0_lrs1; // @[rename-stage.scala:108:29, :341:28]
wire [5:0] bypassed_uop_lrs2 = ren2_uops_0_lrs2; // @[rename-stage.scala:108:29, :341:28]
wire [5:0] bypassed_uop_lrs3 = ren2_uops_0_lrs3; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_ldst_val = ren2_uops_0_ldst_val; // @[rename-stage.scala:108:29, :341:28]
wire [1:0] bypassed_uop_dst_rtype = ren2_uops_0_dst_rtype; // @[rename-stage.scala:108:29, :341:28]
wire [1:0] bypassed_uop_lrs1_rtype = ren2_uops_0_lrs1_rtype; // @[rename-stage.scala:108:29, :341:28]
wire [1:0] bypassed_uop_lrs2_rtype = ren2_uops_0_lrs2_rtype; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_frs3_en = ren2_uops_0_frs3_en; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_fp_val = ren2_uops_0_fp_val; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_fp_single = ren2_uops_0_fp_single; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_xcpt_pf_if = ren2_uops_0_xcpt_pf_if; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_xcpt_ae_if = ren2_uops_0_xcpt_ae_if; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_xcpt_ma_if = ren2_uops_0_xcpt_ma_if; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_bp_debug_if = ren2_uops_0_bp_debug_if; // @[rename-stage.scala:108:29, :341:28]
wire bypassed_uop_bp_xcpt_if = ren2_uops_0_bp_xcpt_if; // @[rename-stage.scala:108:29, :341:28]
wire [1:0] bypassed_uop_debug_fsrc = ren2_uops_0_debug_fsrc; // @[rename-stage.scala:108:29, :341:28]
wire [1:0] bypassed_uop_debug_tsrc = ren2_uops_0_debug_tsrc; // @[rename-stage.scala:108:29, :341:28]
wire [6:0] bypassed_uop_bypassed_uop_uopc = ren2_uops_1_uopc; // @[rename-stage.scala:108:29, :174:28]
wire [31:0] bypassed_uop_bypassed_uop_inst = ren2_uops_1_inst; // @[rename-stage.scala:108:29, :174:28]
wire [31:0] bypassed_uop_bypassed_uop_debug_inst = ren2_uops_1_debug_inst; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_is_rvc = ren2_uops_1_is_rvc; // @[rename-stage.scala:108:29, :174:28]
wire [39:0] bypassed_uop_bypassed_uop_debug_pc = ren2_uops_1_debug_pc; // @[rename-stage.scala:108:29, :174:28]
wire [2:0] bypassed_uop_bypassed_uop_iq_type = ren2_uops_1_iq_type; // @[rename-stage.scala:108:29, :174:28]
wire [9:0] bypassed_uop_bypassed_uop_fu_code = ren2_uops_1_fu_code; // @[rename-stage.scala:108:29, :174:28]
wire [3:0] bypassed_uop_bypassed_uop_ctrl_br_type = ren2_uops_1_ctrl_br_type; // @[rename-stage.scala:108:29, :174:28]
wire [1:0] bypassed_uop_bypassed_uop_ctrl_op1_sel = ren2_uops_1_ctrl_op1_sel; // @[rename-stage.scala:108:29, :174:28]
wire [2:0] bypassed_uop_bypassed_uop_ctrl_op2_sel = ren2_uops_1_ctrl_op2_sel; // @[rename-stage.scala:108:29, :174:28]
wire [2:0] bypassed_uop_bypassed_uop_ctrl_imm_sel = ren2_uops_1_ctrl_imm_sel; // @[rename-stage.scala:108:29, :174:28]
wire [4:0] bypassed_uop_bypassed_uop_ctrl_op_fcn = ren2_uops_1_ctrl_op_fcn; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_ctrl_fcn_dw = ren2_uops_1_ctrl_fcn_dw; // @[rename-stage.scala:108:29, :174:28]
wire [2:0] bypassed_uop_bypassed_uop_ctrl_csr_cmd = ren2_uops_1_ctrl_csr_cmd; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_ctrl_is_load = ren2_uops_1_ctrl_is_load; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_ctrl_is_sta = ren2_uops_1_ctrl_is_sta; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_ctrl_is_std = ren2_uops_1_ctrl_is_std; // @[rename-stage.scala:108:29, :174:28]
wire [1:0] bypassed_uop_bypassed_uop_iw_state = ren2_uops_1_iw_state; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_iw_p1_poisoned = ren2_uops_1_iw_p1_poisoned; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_iw_p2_poisoned = ren2_uops_1_iw_p2_poisoned; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_is_br = ren2_uops_1_is_br; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_is_jalr = ren2_uops_1_is_jalr; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_is_jal = ren2_uops_1_is_jal; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_is_sfb = ren2_uops_1_is_sfb; // @[rename-stage.scala:108:29, :174:28]
wire [15:0] bypassed_uop_bypassed_uop_br_mask = ren2_uops_1_br_mask; // @[rename-stage.scala:108:29, :174:28]
wire [3:0] ren2_br_tags_1_bits = ren2_uops_1_br_tag; // @[rename-stage.scala:108:29, :233:29]
wire [3:0] bypassed_uop_bypassed_uop_br_tag = ren2_uops_1_br_tag; // @[rename-stage.scala:108:29, :174:28]
wire [4:0] bypassed_uop_bypassed_uop_ftq_idx = ren2_uops_1_ftq_idx; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_edge_inst = ren2_uops_1_edge_inst; // @[rename-stage.scala:108:29, :174:28]
wire [5:0] bypassed_uop_bypassed_uop_pc_lob = ren2_uops_1_pc_lob; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_taken = ren2_uops_1_taken; // @[rename-stage.scala:108:29, :174:28]
wire [19:0] bypassed_uop_bypassed_uop_imm_packed = ren2_uops_1_imm_packed; // @[rename-stage.scala:108:29, :174:28]
wire [11:0] bypassed_uop_bypassed_uop_csr_addr = ren2_uops_1_csr_addr; // @[rename-stage.scala:108:29, :174:28]
wire [6:0] bypassed_uop_bypassed_uop_rob_idx = ren2_uops_1_rob_idx; // @[rename-stage.scala:108:29, :174:28]
wire [4:0] bypassed_uop_bypassed_uop_ldq_idx = ren2_uops_1_ldq_idx; // @[rename-stage.scala:108:29, :174:28]
wire [4:0] bypassed_uop_bypassed_uop_stq_idx = ren2_uops_1_stq_idx; // @[rename-stage.scala:108:29, :174:28]
wire [6:0] _ren2_uops_1_pdst_T_2; // @[rename-stage.scala:306:20]
wire [1:0] bypassed_uop_bypassed_uop_rxq_idx = ren2_uops_1_rxq_idx; // @[rename-stage.scala:108:29, :174:28]
wire [6:0] bypassed_uop_bypassed_uop_pdst = ren2_uops_1_pdst; // @[rename-stage.scala:108:29, :174:28]
wire _ren2_uops_1_prs1_busy_T_1; // @[rename-stage.scala:323:47]
wire [4:0] bypassed_uop_bypassed_uop_ppred = ren2_uops_1_ppred; // @[rename-stage.scala:108:29, :174:28]
wire _ren2_uops_1_prs2_busy_T_1; // @[rename-stage.scala:324:47]
wire _ren2_uops_1_prs3_busy_T; // @[rename-stage.scala:325:34]
wire bypassed_uop_bypassed_uop_ppred_busy = ren2_uops_1_ppred_busy; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_exception = ren2_uops_1_exception; // @[rename-stage.scala:108:29, :174:28]
wire [63:0] bypassed_uop_bypassed_uop_exc_cause = ren2_uops_1_exc_cause; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_bypassable = ren2_uops_1_bypassable; // @[rename-stage.scala:108:29, :174:28]
wire [4:0] bypassed_uop_bypassed_uop_mem_cmd = ren2_uops_1_mem_cmd; // @[rename-stage.scala:108:29, :174:28]
wire [1:0] bypassed_uop_bypassed_uop_mem_size = ren2_uops_1_mem_size; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_mem_signed = ren2_uops_1_mem_signed; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_is_fence = ren2_uops_1_is_fence; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_is_fencei = ren2_uops_1_is_fencei; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_is_amo = ren2_uops_1_is_amo; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_uses_ldq = ren2_uops_1_uses_ldq; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_uses_stq = ren2_uops_1_uses_stq; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_is_sys_pc2epc = ren2_uops_1_is_sys_pc2epc; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_is_unique = ren2_uops_1_is_unique; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_flush_on_commit = ren2_uops_1_flush_on_commit; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_ldst_is_rs1 = ren2_uops_1_ldst_is_rs1; // @[rename-stage.scala:108:29, :174:28]
wire [5:0] bypassed_uop_bypassed_uop_ldst = ren2_uops_1_ldst; // @[rename-stage.scala:108:29, :174:28]
wire [5:0] bypassed_uop_bypassed_uop_lrs1 = ren2_uops_1_lrs1; // @[rename-stage.scala:108:29, :174:28]
wire [5:0] bypassed_uop_bypassed_uop_lrs2 = ren2_uops_1_lrs2; // @[rename-stage.scala:108:29, :174:28]
wire [5:0] bypassed_uop_bypassed_uop_lrs3 = ren2_uops_1_lrs3; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_ldst_val = ren2_uops_1_ldst_val; // @[rename-stage.scala:108:29, :174:28]
wire [1:0] bypassed_uop_bypassed_uop_dst_rtype = ren2_uops_1_dst_rtype; // @[rename-stage.scala:108:29, :174:28]
wire [1:0] bypassed_uop_bypassed_uop_lrs1_rtype = ren2_uops_1_lrs1_rtype; // @[rename-stage.scala:108:29, :174:28]
wire [1:0] bypassed_uop_bypassed_uop_lrs2_rtype = ren2_uops_1_lrs2_rtype; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_frs3_en = ren2_uops_1_frs3_en; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_fp_val = ren2_uops_1_fp_val; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_fp_single = ren2_uops_1_fp_single; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_xcpt_pf_if = ren2_uops_1_xcpt_pf_if; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_xcpt_ae_if = ren2_uops_1_xcpt_ae_if; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_xcpt_ma_if = ren2_uops_1_xcpt_ma_if; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_bp_debug_if = ren2_uops_1_bp_debug_if; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_bp_xcpt_if = ren2_uops_1_bp_xcpt_if; // @[rename-stage.scala:108:29, :174:28]
wire [1:0] bypassed_uop_bypassed_uop_debug_fsrc = ren2_uops_1_debug_fsrc; // @[rename-stage.scala:108:29, :174:28]
wire [1:0] bypassed_uop_bypassed_uop_debug_tsrc = ren2_uops_1_debug_tsrc; // @[rename-stage.scala:108:29, :174:28]
wire [6:0] bypassed_uop_bypassed_uop_1_uopc = ren2_uops_2_uopc; // @[rename-stage.scala:108:29, :174:28]
wire [31:0] bypassed_uop_bypassed_uop_1_inst = ren2_uops_2_inst; // @[rename-stage.scala:108:29, :174:28]
wire [31:0] bypassed_uop_bypassed_uop_1_debug_inst = ren2_uops_2_debug_inst; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_is_rvc = ren2_uops_2_is_rvc; // @[rename-stage.scala:108:29, :174:28]
wire [39:0] bypassed_uop_bypassed_uop_1_debug_pc = ren2_uops_2_debug_pc; // @[rename-stage.scala:108:29, :174:28]
wire [2:0] bypassed_uop_bypassed_uop_1_iq_type = ren2_uops_2_iq_type; // @[rename-stage.scala:108:29, :174:28]
wire [9:0] bypassed_uop_bypassed_uop_1_fu_code = ren2_uops_2_fu_code; // @[rename-stage.scala:108:29, :174:28]
wire [3:0] bypassed_uop_bypassed_uop_1_ctrl_br_type = ren2_uops_2_ctrl_br_type; // @[rename-stage.scala:108:29, :174:28]
wire [1:0] bypassed_uop_bypassed_uop_1_ctrl_op1_sel = ren2_uops_2_ctrl_op1_sel; // @[rename-stage.scala:108:29, :174:28]
wire [2:0] bypassed_uop_bypassed_uop_1_ctrl_op2_sel = ren2_uops_2_ctrl_op2_sel; // @[rename-stage.scala:108:29, :174:28]
wire [2:0] bypassed_uop_bypassed_uop_1_ctrl_imm_sel = ren2_uops_2_ctrl_imm_sel; // @[rename-stage.scala:108:29, :174:28]
wire [4:0] bypassed_uop_bypassed_uop_1_ctrl_op_fcn = ren2_uops_2_ctrl_op_fcn; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_ctrl_fcn_dw = ren2_uops_2_ctrl_fcn_dw; // @[rename-stage.scala:108:29, :174:28]
wire [2:0] bypassed_uop_bypassed_uop_1_ctrl_csr_cmd = ren2_uops_2_ctrl_csr_cmd; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_ctrl_is_load = ren2_uops_2_ctrl_is_load; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_ctrl_is_sta = ren2_uops_2_ctrl_is_sta; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_ctrl_is_std = ren2_uops_2_ctrl_is_std; // @[rename-stage.scala:108:29, :174:28]
wire [1:0] bypassed_uop_bypassed_uop_1_iw_state = ren2_uops_2_iw_state; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_iw_p1_poisoned = ren2_uops_2_iw_p1_poisoned; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_iw_p2_poisoned = ren2_uops_2_iw_p2_poisoned; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_is_br = ren2_uops_2_is_br; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_is_jalr = ren2_uops_2_is_jalr; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_is_jal = ren2_uops_2_is_jal; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_is_sfb = ren2_uops_2_is_sfb; // @[rename-stage.scala:108:29, :174:28]
wire [15:0] bypassed_uop_bypassed_uop_1_br_mask = ren2_uops_2_br_mask; // @[rename-stage.scala:108:29, :174:28]
wire [3:0] ren2_br_tags_2_bits = ren2_uops_2_br_tag; // @[rename-stage.scala:108:29, :233:29]
wire [3:0] bypassed_uop_bypassed_uop_1_br_tag = ren2_uops_2_br_tag; // @[rename-stage.scala:108:29, :174:28]
wire [4:0] bypassed_uop_bypassed_uop_1_ftq_idx = ren2_uops_2_ftq_idx; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_edge_inst = ren2_uops_2_edge_inst; // @[rename-stage.scala:108:29, :174:28]
wire [5:0] bypassed_uop_bypassed_uop_1_pc_lob = ren2_uops_2_pc_lob; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_taken = ren2_uops_2_taken; // @[rename-stage.scala:108:29, :174:28]
wire [19:0] bypassed_uop_bypassed_uop_1_imm_packed = ren2_uops_2_imm_packed; // @[rename-stage.scala:108:29, :174:28]
wire [11:0] bypassed_uop_bypassed_uop_1_csr_addr = ren2_uops_2_csr_addr; // @[rename-stage.scala:108:29, :174:28]
wire [6:0] bypassed_uop_bypassed_uop_1_rob_idx = ren2_uops_2_rob_idx; // @[rename-stage.scala:108:29, :174:28]
wire [4:0] bypassed_uop_bypassed_uop_1_ldq_idx = ren2_uops_2_ldq_idx; // @[rename-stage.scala:108:29, :174:28]
wire [4:0] bypassed_uop_bypassed_uop_1_stq_idx = ren2_uops_2_stq_idx; // @[rename-stage.scala:108:29, :174:28]
wire [6:0] _ren2_uops_2_pdst_T_2; // @[rename-stage.scala:306:20]
wire [1:0] bypassed_uop_bypassed_uop_1_rxq_idx = ren2_uops_2_rxq_idx; // @[rename-stage.scala:108:29, :174:28]
wire [6:0] bypassed_uop_bypassed_uop_1_pdst = ren2_uops_2_pdst; // @[rename-stage.scala:108:29, :174:28]
wire _ren2_uops_2_prs1_busy_T_1; // @[rename-stage.scala:323:47]
wire [4:0] bypassed_uop_bypassed_uop_1_ppred = ren2_uops_2_ppred; // @[rename-stage.scala:108:29, :174:28]
wire _ren2_uops_2_prs2_busy_T_1; // @[rename-stage.scala:324:47]
wire _ren2_uops_2_prs3_busy_T; // @[rename-stage.scala:325:34]
wire bypassed_uop_bypassed_uop_1_ppred_busy = ren2_uops_2_ppred_busy; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_exception = ren2_uops_2_exception; // @[rename-stage.scala:108:29, :174:28]
wire [63:0] bypassed_uop_bypassed_uop_1_exc_cause = ren2_uops_2_exc_cause; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_bypassable = ren2_uops_2_bypassable; // @[rename-stage.scala:108:29, :174:28]
wire [4:0] bypassed_uop_bypassed_uop_1_mem_cmd = ren2_uops_2_mem_cmd; // @[rename-stage.scala:108:29, :174:28]
wire [1:0] bypassed_uop_bypassed_uop_1_mem_size = ren2_uops_2_mem_size; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_mem_signed = ren2_uops_2_mem_signed; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_is_fence = ren2_uops_2_is_fence; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_is_fencei = ren2_uops_2_is_fencei; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_is_amo = ren2_uops_2_is_amo; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_uses_ldq = ren2_uops_2_uses_ldq; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_uses_stq = ren2_uops_2_uses_stq; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_is_sys_pc2epc = ren2_uops_2_is_sys_pc2epc; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_is_unique = ren2_uops_2_is_unique; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_flush_on_commit = ren2_uops_2_flush_on_commit; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_ldst_is_rs1 = ren2_uops_2_ldst_is_rs1; // @[rename-stage.scala:108:29, :174:28]
wire [5:0] bypassed_uop_bypassed_uop_1_ldst = ren2_uops_2_ldst; // @[rename-stage.scala:108:29, :174:28]
wire [5:0] bypassed_uop_bypassed_uop_1_lrs1 = ren2_uops_2_lrs1; // @[rename-stage.scala:108:29, :174:28]
wire [5:0] bypassed_uop_bypassed_uop_1_lrs2 = ren2_uops_2_lrs2; // @[rename-stage.scala:108:29, :174:28]
wire [5:0] bypassed_uop_bypassed_uop_1_lrs3 = ren2_uops_2_lrs3; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_ldst_val = ren2_uops_2_ldst_val; // @[rename-stage.scala:108:29, :174:28]
wire [1:0] bypassed_uop_bypassed_uop_1_dst_rtype = ren2_uops_2_dst_rtype; // @[rename-stage.scala:108:29, :174:28]
wire [1:0] bypassed_uop_bypassed_uop_1_lrs1_rtype = ren2_uops_2_lrs1_rtype; // @[rename-stage.scala:108:29, :174:28]
wire [1:0] bypassed_uop_bypassed_uop_1_lrs2_rtype = ren2_uops_2_lrs2_rtype; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_frs3_en = ren2_uops_2_frs3_en; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_fp_val = ren2_uops_2_fp_val; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_fp_single = ren2_uops_2_fp_single; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_xcpt_pf_if = ren2_uops_2_xcpt_pf_if; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_xcpt_ae_if = ren2_uops_2_xcpt_ae_if; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_xcpt_ma_if = ren2_uops_2_xcpt_ma_if; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_bp_debug_if = ren2_uops_2_bp_debug_if; // @[rename-stage.scala:108:29, :174:28]
wire bypassed_uop_bypassed_uop_1_bp_xcpt_if = ren2_uops_2_bp_xcpt_if; // @[rename-stage.scala:108:29, :174:28]
wire [1:0] bypassed_uop_bypassed_uop_1_debug_fsrc = ren2_uops_2_debug_fsrc; // @[rename-stage.scala:108:29, :174:28]
wire [1:0] bypassed_uop_bypassed_uop_1_debug_tsrc = ren2_uops_2_debug_tsrc; // @[rename-stage.scala:108:29, :174:28]
wire [6:0] ren2_uops_1_prs1; // @[rename-stage.scala:108:29]
wire [6:0] ren2_uops_1_prs2; // @[rename-stage.scala:108:29]
wire [6:0] ren2_uops_1_prs3; // @[rename-stage.scala:108:29]
wire ren2_uops_1_prs1_busy; // @[rename-stage.scala:108:29]
wire ren2_uops_1_prs2_busy; // @[rename-stage.scala:108:29]
wire ren2_uops_1_prs3_busy; // @[rename-stage.scala:108:29]
wire [6:0] ren2_uops_1_stale_pdst; // @[rename-stage.scala:108:29]
wire [6:0] ren2_uops_2_prs1; // @[rename-stage.scala:108:29]
wire [6:0] ren2_uops_2_prs2; // @[rename-stage.scala:108:29]
wire [6:0] ren2_uops_2_prs3; // @[rename-stage.scala:108:29]
wire ren2_uops_2_prs1_busy; // @[rename-stage.scala:108:29]
wire ren2_uops_2_prs2_busy; // @[rename-stage.scala:108:29]
wire ren2_uops_2_prs3_busy; // @[rename-stage.scala:108:29]
wire [6:0] ren2_uops_2_stale_pdst; // @[rename-stage.scala:108:29]
wire _ren2_alloc_reqs_0_T_2; // @[rename-stage.scala:240:88]
wire _ren2_alloc_reqs_1_T_2; // @[rename-stage.scala:240:88]
wire _ren2_alloc_reqs_2_T_2; // @[rename-stage.scala:240:88]
wire ren2_alloc_reqs_0; // @[rename-stage.scala:109:29]
wire ren2_alloc_reqs_1; // @[rename-stage.scala:109:29]
wire ren2_alloc_reqs_2; // @[rename-stage.scala:109:29]
reg r_valid; // @[rename-stage.scala:121:27]
assign ren2_valids_0 = r_valid; // @[rename-stage.scala:107:29, :121:27]
reg [6:0] r_uop_uopc; // @[rename-stage.scala:122:23]
assign ren2_uops_0_uopc = r_uop_uopc; // @[rename-stage.scala:108:29, :122:23]
reg [31:0] r_uop_inst; // @[rename-stage.scala:122:23]
assign ren2_uops_0_inst = r_uop_inst; // @[rename-stage.scala:108:29, :122:23]
reg [31:0] r_uop_debug_inst; // @[rename-stage.scala:122:23]
assign ren2_uops_0_debug_inst = r_uop_debug_inst; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_is_rvc; // @[rename-stage.scala:122:23]
assign ren2_uops_0_is_rvc = r_uop_is_rvc; // @[rename-stage.scala:108:29, :122:23]
reg [39:0] r_uop_debug_pc; // @[rename-stage.scala:122:23]
assign ren2_uops_0_debug_pc = r_uop_debug_pc; // @[rename-stage.scala:108:29, :122:23]
reg [2:0] r_uop_iq_type; // @[rename-stage.scala:122:23]
assign ren2_uops_0_iq_type = r_uop_iq_type; // @[rename-stage.scala:108:29, :122:23]
reg [9:0] r_uop_fu_code; // @[rename-stage.scala:122:23]
assign ren2_uops_0_fu_code = r_uop_fu_code; // @[rename-stage.scala:108:29, :122:23]
reg [3:0] r_uop_ctrl_br_type; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ctrl_br_type = r_uop_ctrl_br_type; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_ctrl_op1_sel; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ctrl_op1_sel = r_uop_ctrl_op1_sel; // @[rename-stage.scala:108:29, :122:23]
reg [2:0] r_uop_ctrl_op2_sel; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ctrl_op2_sel = r_uop_ctrl_op2_sel; // @[rename-stage.scala:108:29, :122:23]
reg [2:0] r_uop_ctrl_imm_sel; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ctrl_imm_sel = r_uop_ctrl_imm_sel; // @[rename-stage.scala:108:29, :122:23]
reg [4:0] r_uop_ctrl_op_fcn; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ctrl_op_fcn = r_uop_ctrl_op_fcn; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_ctrl_fcn_dw; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ctrl_fcn_dw = r_uop_ctrl_fcn_dw; // @[rename-stage.scala:108:29, :122:23]
reg [2:0] r_uop_ctrl_csr_cmd; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ctrl_csr_cmd = r_uop_ctrl_csr_cmd; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_ctrl_is_load; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ctrl_is_load = r_uop_ctrl_is_load; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_ctrl_is_sta; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ctrl_is_sta = r_uop_ctrl_is_sta; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_ctrl_is_std; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ctrl_is_std = r_uop_ctrl_is_std; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_iw_state; // @[rename-stage.scala:122:23]
assign ren2_uops_0_iw_state = r_uop_iw_state; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_iw_p1_poisoned; // @[rename-stage.scala:122:23]
assign ren2_uops_0_iw_p1_poisoned = r_uop_iw_p1_poisoned; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_iw_p2_poisoned; // @[rename-stage.scala:122:23]
assign ren2_uops_0_iw_p2_poisoned = r_uop_iw_p2_poisoned; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_is_br; // @[rename-stage.scala:122:23]
assign ren2_uops_0_is_br = r_uop_is_br; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_is_jalr; // @[rename-stage.scala:122:23]
assign ren2_uops_0_is_jalr = r_uop_is_jalr; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_is_jal; // @[rename-stage.scala:122:23]
assign ren2_uops_0_is_jal = r_uop_is_jal; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_is_sfb; // @[rename-stage.scala:122:23]
assign ren2_uops_0_is_sfb = r_uop_is_sfb; // @[rename-stage.scala:108:29, :122:23]
reg [15:0] r_uop_br_mask; // @[rename-stage.scala:122:23]
assign ren2_uops_0_br_mask = r_uop_br_mask; // @[rename-stage.scala:108:29, :122:23]
reg [3:0] r_uop_br_tag; // @[rename-stage.scala:122:23]
assign ren2_uops_0_br_tag = r_uop_br_tag; // @[rename-stage.scala:108:29, :122:23]
reg [4:0] r_uop_ftq_idx; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ftq_idx = r_uop_ftq_idx; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_edge_inst; // @[rename-stage.scala:122:23]
assign ren2_uops_0_edge_inst = r_uop_edge_inst; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_pc_lob; // @[rename-stage.scala:122:23]
assign ren2_uops_0_pc_lob = r_uop_pc_lob; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_taken; // @[rename-stage.scala:122:23]
assign ren2_uops_0_taken = r_uop_taken; // @[rename-stage.scala:108:29, :122:23]
reg [19:0] r_uop_imm_packed; // @[rename-stage.scala:122:23]
assign ren2_uops_0_imm_packed = r_uop_imm_packed; // @[rename-stage.scala:108:29, :122:23]
reg [11:0] r_uop_csr_addr; // @[rename-stage.scala:122:23]
assign ren2_uops_0_csr_addr = r_uop_csr_addr; // @[rename-stage.scala:108:29, :122:23]
reg [6:0] r_uop_rob_idx; // @[rename-stage.scala:122:23]
assign ren2_uops_0_rob_idx = r_uop_rob_idx; // @[rename-stage.scala:108:29, :122:23]
reg [4:0] r_uop_ldq_idx; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ldq_idx = r_uop_ldq_idx; // @[rename-stage.scala:108:29, :122:23]
reg [4:0] r_uop_stq_idx; // @[rename-stage.scala:122:23]
assign ren2_uops_0_stq_idx = r_uop_stq_idx; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_rxq_idx; // @[rename-stage.scala:122:23]
assign ren2_uops_0_rxq_idx = r_uop_rxq_idx; // @[rename-stage.scala:108:29, :122:23]
reg [6:0] r_uop_pdst; // @[rename-stage.scala:122:23]
reg [6:0] r_uop_prs1; // @[rename-stage.scala:122:23]
assign ren2_uops_0_prs1 = r_uop_prs1; // @[rename-stage.scala:108:29, :122:23]
reg [6:0] r_uop_prs2; // @[rename-stage.scala:122:23]
assign ren2_uops_0_prs2 = r_uop_prs2; // @[rename-stage.scala:108:29, :122:23]
reg [6:0] r_uop_prs3; // @[rename-stage.scala:122:23]
assign ren2_uops_0_prs3 = r_uop_prs3; // @[rename-stage.scala:108:29, :122:23]
reg [4:0] r_uop_ppred; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ppred = r_uop_ppred; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_prs1_busy; // @[rename-stage.scala:122:23]
reg r_uop_prs2_busy; // @[rename-stage.scala:122:23]
reg r_uop_prs3_busy; // @[rename-stage.scala:122:23]
reg r_uop_ppred_busy; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ppred_busy = r_uop_ppred_busy; // @[rename-stage.scala:108:29, :122:23]
reg [6:0] r_uop_stale_pdst; // @[rename-stage.scala:122:23]
assign ren2_uops_0_stale_pdst = r_uop_stale_pdst; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_exception; // @[rename-stage.scala:122:23]
assign ren2_uops_0_exception = r_uop_exception; // @[rename-stage.scala:108:29, :122:23]
reg [63:0] r_uop_exc_cause; // @[rename-stage.scala:122:23]
assign ren2_uops_0_exc_cause = r_uop_exc_cause; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_bypassable; // @[rename-stage.scala:122:23]
assign ren2_uops_0_bypassable = r_uop_bypassable; // @[rename-stage.scala:108:29, :122:23]
reg [4:0] r_uop_mem_cmd; // @[rename-stage.scala:122:23]
assign ren2_uops_0_mem_cmd = r_uop_mem_cmd; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_mem_size; // @[rename-stage.scala:122:23]
assign ren2_uops_0_mem_size = r_uop_mem_size; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_mem_signed; // @[rename-stage.scala:122:23]
assign ren2_uops_0_mem_signed = r_uop_mem_signed; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_is_fence; // @[rename-stage.scala:122:23]
assign ren2_uops_0_is_fence = r_uop_is_fence; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_is_fencei; // @[rename-stage.scala:122:23]
assign ren2_uops_0_is_fencei = r_uop_is_fencei; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_is_amo; // @[rename-stage.scala:122:23]
assign ren2_uops_0_is_amo = r_uop_is_amo; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_uses_ldq; // @[rename-stage.scala:122:23]
assign ren2_uops_0_uses_ldq = r_uop_uses_ldq; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_uses_stq; // @[rename-stage.scala:122:23]
assign ren2_uops_0_uses_stq = r_uop_uses_stq; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_is_sys_pc2epc; // @[rename-stage.scala:122:23]
assign ren2_uops_0_is_sys_pc2epc = r_uop_is_sys_pc2epc; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_is_unique; // @[rename-stage.scala:122:23]
assign ren2_uops_0_is_unique = r_uop_is_unique; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_flush_on_commit; // @[rename-stage.scala:122:23]
assign ren2_uops_0_flush_on_commit = r_uop_flush_on_commit; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_ldst_is_rs1; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ldst_is_rs1 = r_uop_ldst_is_rs1; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_ldst; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ldst = r_uop_ldst; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_lrs1; // @[rename-stage.scala:122:23]
assign ren2_uops_0_lrs1 = r_uop_lrs1; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_lrs2; // @[rename-stage.scala:122:23]
assign ren2_uops_0_lrs2 = r_uop_lrs2; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_lrs3; // @[rename-stage.scala:122:23]
assign ren2_uops_0_lrs3 = r_uop_lrs3; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_ldst_val; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ldst_val = r_uop_ldst_val; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_dst_rtype; // @[rename-stage.scala:122:23]
assign ren2_uops_0_dst_rtype = r_uop_dst_rtype; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_lrs1_rtype; // @[rename-stage.scala:122:23]
assign ren2_uops_0_lrs1_rtype = r_uop_lrs1_rtype; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_lrs2_rtype; // @[rename-stage.scala:122:23]
assign ren2_uops_0_lrs2_rtype = r_uop_lrs2_rtype; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_frs3_en; // @[rename-stage.scala:122:23]
assign ren2_uops_0_frs3_en = r_uop_frs3_en; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_fp_val; // @[rename-stage.scala:122:23]
assign ren2_uops_0_fp_val = r_uop_fp_val; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_fp_single; // @[rename-stage.scala:122:23]
assign ren2_uops_0_fp_single = r_uop_fp_single; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_xcpt_pf_if; // @[rename-stage.scala:122:23]
assign ren2_uops_0_xcpt_pf_if = r_uop_xcpt_pf_if; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_xcpt_ae_if; // @[rename-stage.scala:122:23]
assign ren2_uops_0_xcpt_ae_if = r_uop_xcpt_ae_if; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_xcpt_ma_if; // @[rename-stage.scala:122:23]
assign ren2_uops_0_xcpt_ma_if = r_uop_xcpt_ma_if; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_bp_debug_if; // @[rename-stage.scala:122:23]
assign ren2_uops_0_bp_debug_if = r_uop_bp_debug_if; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_bp_xcpt_if; // @[rename-stage.scala:122:23]
assign ren2_uops_0_bp_xcpt_if = r_uop_bp_xcpt_if; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_debug_fsrc; // @[rename-stage.scala:122:23]
assign ren2_uops_0_debug_fsrc = r_uop_debug_fsrc; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_debug_tsrc; // @[rename-stage.scala:122:23]
assign ren2_uops_0_debug_tsrc = r_uop_debug_tsrc; // @[rename-stage.scala:108:29, :122:23]
wire [6:0] r_uop_bypassed_uop_uopc = next_uop_uopc; // @[rename-stage.scala:123:24, :174:28]
wire [31:0] r_uop_bypassed_uop_inst = next_uop_inst; // @[rename-stage.scala:123:24, :174:28]
wire [31:0] r_uop_bypassed_uop_debug_inst = next_uop_debug_inst; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_is_rvc = next_uop_is_rvc; // @[rename-stage.scala:123:24, :174:28]
wire [39:0] r_uop_bypassed_uop_debug_pc = next_uop_debug_pc; // @[rename-stage.scala:123:24, :174:28]
wire [2:0] r_uop_bypassed_uop_iq_type = next_uop_iq_type; // @[rename-stage.scala:123:24, :174:28]
wire [9:0] r_uop_bypassed_uop_fu_code = next_uop_fu_code; // @[rename-stage.scala:123:24, :174:28]
wire [3:0] r_uop_bypassed_uop_ctrl_br_type = next_uop_ctrl_br_type; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_ctrl_op1_sel = next_uop_ctrl_op1_sel; // @[rename-stage.scala:123:24, :174:28]
wire [2:0] r_uop_bypassed_uop_ctrl_op2_sel = next_uop_ctrl_op2_sel; // @[rename-stage.scala:123:24, :174:28]
wire [2:0] r_uop_bypassed_uop_ctrl_imm_sel = next_uop_ctrl_imm_sel; // @[rename-stage.scala:123:24, :174:28]
wire [4:0] r_uop_bypassed_uop_ctrl_op_fcn = next_uop_ctrl_op_fcn; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_ctrl_fcn_dw = next_uop_ctrl_fcn_dw; // @[rename-stage.scala:123:24, :174:28]
wire [2:0] r_uop_bypassed_uop_ctrl_csr_cmd = next_uop_ctrl_csr_cmd; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_ctrl_is_load = next_uop_ctrl_is_load; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_ctrl_is_sta = next_uop_ctrl_is_sta; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_ctrl_is_std = next_uop_ctrl_is_std; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_iw_state = next_uop_iw_state; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_iw_p1_poisoned = next_uop_iw_p1_poisoned; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_iw_p2_poisoned = next_uop_iw_p2_poisoned; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_is_br = next_uop_is_br; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_is_jalr = next_uop_is_jalr; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_is_jal = next_uop_is_jal; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_is_sfb = next_uop_is_sfb; // @[rename-stage.scala:123:24, :174:28]
wire [15:0] r_uop_bypassed_uop_br_mask = next_uop_br_mask; // @[rename-stage.scala:123:24, :174:28]
wire [3:0] r_uop_bypassed_uop_br_tag = next_uop_br_tag; // @[rename-stage.scala:123:24, :174:28]
wire [4:0] r_uop_bypassed_uop_ftq_idx = next_uop_ftq_idx; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_edge_inst = next_uop_edge_inst; // @[rename-stage.scala:123:24, :174:28]
wire [5:0] r_uop_bypassed_uop_pc_lob = next_uop_pc_lob; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_taken = next_uop_taken; // @[rename-stage.scala:123:24, :174:28]
wire [19:0] r_uop_bypassed_uop_imm_packed = next_uop_imm_packed; // @[rename-stage.scala:123:24, :174:28]
wire [11:0] r_uop_bypassed_uop_csr_addr = next_uop_csr_addr; // @[rename-stage.scala:123:24, :174:28]
wire [6:0] r_uop_bypassed_uop_rob_idx = next_uop_rob_idx; // @[rename-stage.scala:123:24, :174:28]
wire [4:0] r_uop_bypassed_uop_ldq_idx = next_uop_ldq_idx; // @[rename-stage.scala:123:24, :174:28]
wire [4:0] r_uop_bypassed_uop_stq_idx = next_uop_stq_idx; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_rxq_idx = next_uop_rxq_idx; // @[rename-stage.scala:123:24, :174:28]
wire [6:0] r_uop_bypassed_uop_pdst = next_uop_pdst; // @[rename-stage.scala:123:24, :174:28]
wire [4:0] r_uop_bypassed_uop_ppred = next_uop_ppred; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_ppred_busy = next_uop_ppred_busy; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_exception = next_uop_exception; // @[rename-stage.scala:123:24, :174:28]
wire [63:0] r_uop_bypassed_uop_exc_cause = next_uop_exc_cause; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_bypassable = next_uop_bypassable; // @[rename-stage.scala:123:24, :174:28]
wire [4:0] r_uop_bypassed_uop_mem_cmd = next_uop_mem_cmd; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_mem_size = next_uop_mem_size; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_mem_signed = next_uop_mem_signed; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_is_fence = next_uop_is_fence; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_is_fencei = next_uop_is_fencei; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_is_amo = next_uop_is_amo; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_uses_ldq = next_uop_uses_ldq; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_uses_stq = next_uop_uses_stq; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_is_sys_pc2epc = next_uop_is_sys_pc2epc; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_is_unique = next_uop_is_unique; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_flush_on_commit = next_uop_flush_on_commit; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_ldst_is_rs1 = next_uop_ldst_is_rs1; // @[rename-stage.scala:123:24, :174:28]
wire [5:0] r_uop_bypassed_uop_ldst = next_uop_ldst; // @[rename-stage.scala:123:24, :174:28]
wire [5:0] r_uop_bypassed_uop_lrs1 = next_uop_lrs1; // @[rename-stage.scala:123:24, :174:28]
wire [5:0] r_uop_bypassed_uop_lrs2 = next_uop_lrs2; // @[rename-stage.scala:123:24, :174:28]
wire [5:0] r_uop_bypassed_uop_lrs3 = next_uop_lrs3; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_ldst_val = next_uop_ldst_val; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_dst_rtype = next_uop_dst_rtype; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_lrs1_rtype = next_uop_lrs1_rtype; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_lrs2_rtype = next_uop_lrs2_rtype; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_frs3_en = next_uop_frs3_en; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_fp_val = next_uop_fp_val; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_fp_single = next_uop_fp_single; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_xcpt_pf_if = next_uop_xcpt_pf_if; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_xcpt_ae_if = next_uop_xcpt_ae_if; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_xcpt_ma_if = next_uop_xcpt_ma_if; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_bp_debug_if = next_uop_bp_debug_if; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_bp_xcpt_if = next_uop_bp_xcpt_if; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_debug_fsrc = next_uop_debug_fsrc; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_debug_tsrc = next_uop_debug_tsrc; // @[rename-stage.scala:123:24, :174:28]
wire [6:0] next_uop_prs1; // @[rename-stage.scala:123:24]
wire [6:0] next_uop_prs2; // @[rename-stage.scala:123:24]
wire [6:0] next_uop_prs3; // @[rename-stage.scala:123:24]
wire next_uop_prs1_busy; // @[rename-stage.scala:123:24]
wire next_uop_prs2_busy; // @[rename-stage.scala:123:24]
wire next_uop_prs3_busy; // @[rename-stage.scala:123:24]
wire [6:0] next_uop_stale_pdst; // @[rename-stage.scala:123:24]
wire _r_valid_T = ~io_dis_fire_0_0; // @[rename-stage.scala:133:29, :160:7]
wire _r_valid_T_1 = r_valid & _r_valid_T; // @[rename-stage.scala:121:27, :133:{26,29}]
wire _GEN = io_kill_0 | ~io_dis_ready_0; // @[rename-stage.scala:125:14, :127:20, :129:30, :160:7]
assign next_uop_uopc = _GEN ? r_uop_uopc : ren1_uops_0_uopc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_inst = _GEN ? r_uop_inst : ren1_uops_0_inst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_debug_inst = _GEN ? r_uop_debug_inst : ren1_uops_0_debug_inst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_is_rvc = _GEN ? r_uop_is_rvc : ren1_uops_0_is_rvc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_debug_pc = _GEN ? r_uop_debug_pc : ren1_uops_0_debug_pc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_iq_type = _GEN ? r_uop_iq_type : ren1_uops_0_iq_type; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_fu_code = _GEN ? r_uop_fu_code : ren1_uops_0_fu_code; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ctrl_br_type = _GEN ? r_uop_ctrl_br_type : 4'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ctrl_op1_sel = _GEN ? r_uop_ctrl_op1_sel : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ctrl_op2_sel = _GEN ? r_uop_ctrl_op2_sel : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ctrl_imm_sel = _GEN ? r_uop_ctrl_imm_sel : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ctrl_op_fcn = _GEN ? r_uop_ctrl_op_fcn : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ctrl_fcn_dw = _GEN & r_uop_ctrl_fcn_dw; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ctrl_csr_cmd = _GEN ? r_uop_ctrl_csr_cmd : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ctrl_is_load = _GEN & r_uop_ctrl_is_load; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ctrl_is_sta = _GEN & r_uop_ctrl_is_sta; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ctrl_is_std = _GEN & r_uop_ctrl_is_std; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_iw_state = _GEN ? r_uop_iw_state : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_iw_p1_poisoned = _GEN & r_uop_iw_p1_poisoned; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_iw_p2_poisoned = _GEN & r_uop_iw_p2_poisoned; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_is_br = _GEN ? r_uop_is_br : ren1_uops_0_is_br; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_is_jalr = _GEN ? r_uop_is_jalr : ren1_uops_0_is_jalr; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_is_jal = _GEN ? r_uop_is_jal : ren1_uops_0_is_jal; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_is_sfb = _GEN ? r_uop_is_sfb : ren1_uops_0_is_sfb; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_br_mask = _GEN ? r_uop_br_mask : ren1_uops_0_br_mask; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_br_tag = _GEN ? r_uop_br_tag : ren1_uops_0_br_tag; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ftq_idx = _GEN ? r_uop_ftq_idx : ren1_uops_0_ftq_idx; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_edge_inst = _GEN ? r_uop_edge_inst : ren1_uops_0_edge_inst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_pc_lob = _GEN ? r_uop_pc_lob : ren1_uops_0_pc_lob; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_taken = _GEN ? r_uop_taken : ren1_uops_0_taken; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_imm_packed = _GEN ? r_uop_imm_packed : ren1_uops_0_imm_packed; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_csr_addr = _GEN ? r_uop_csr_addr : 12'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_rob_idx = _GEN ? r_uop_rob_idx : 7'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ldq_idx = _GEN ? r_uop_ldq_idx : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_stq_idx = _GEN ? r_uop_stq_idx : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_rxq_idx = _GEN ? r_uop_rxq_idx : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_pdst = _GEN ? r_uop_pdst : 7'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_prs1 = _GEN ? r_uop_prs1 : ren1_uops_0_prs1; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_prs2 = _GEN ? r_uop_prs2 : ren1_uops_0_prs2; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_prs3 = _GEN ? r_uop_prs3 : ren1_uops_0_prs3; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ppred = _GEN ? r_uop_ppred : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_prs1_busy = _GEN & r_uop_prs1_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_prs2_busy = _GEN & r_uop_prs2_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_prs3_busy = _GEN & r_uop_prs3_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ppred_busy = _GEN & r_uop_ppred_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_stale_pdst = _GEN ? r_uop_stale_pdst : ren1_uops_0_stale_pdst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_exception = _GEN ? r_uop_exception : ren1_uops_0_exception; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_exc_cause = _GEN ? r_uop_exc_cause : ren1_uops_0_exc_cause; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_bypassable = _GEN ? r_uop_bypassable : ren1_uops_0_bypassable; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_mem_cmd = _GEN ? r_uop_mem_cmd : ren1_uops_0_mem_cmd; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_mem_size = _GEN ? r_uop_mem_size : ren1_uops_0_mem_size; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_mem_signed = _GEN ? r_uop_mem_signed : ren1_uops_0_mem_signed; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_is_fence = _GEN ? r_uop_is_fence : ren1_uops_0_is_fence; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_is_fencei = _GEN ? r_uop_is_fencei : ren1_uops_0_is_fencei; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_is_amo = _GEN ? r_uop_is_amo : ren1_uops_0_is_amo; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_uses_ldq = _GEN ? r_uop_uses_ldq : ren1_uops_0_uses_ldq; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_uses_stq = _GEN ? r_uop_uses_stq : ren1_uops_0_uses_stq; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_is_sys_pc2epc = _GEN ? r_uop_is_sys_pc2epc : ren1_uops_0_is_sys_pc2epc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_is_unique = _GEN ? r_uop_is_unique : ren1_uops_0_is_unique; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_flush_on_commit = _GEN ? r_uop_flush_on_commit : ren1_uops_0_flush_on_commit; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ldst_is_rs1 = _GEN & r_uop_ldst_is_rs1; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ldst = _GEN ? r_uop_ldst : ren1_uops_0_ldst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_lrs1 = _GEN ? r_uop_lrs1 : ren1_uops_0_lrs1; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_lrs2 = _GEN ? r_uop_lrs2 : ren1_uops_0_lrs2; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_lrs3 = _GEN ? r_uop_lrs3 : ren1_uops_0_lrs3; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ldst_val = _GEN ? r_uop_ldst_val : ren1_uops_0_ldst_val; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_dst_rtype = _GEN ? r_uop_dst_rtype : ren1_uops_0_dst_rtype; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_lrs1_rtype = _GEN ? r_uop_lrs1_rtype : ren1_uops_0_lrs1_rtype; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_lrs2_rtype = _GEN ? r_uop_lrs2_rtype : ren1_uops_0_lrs2_rtype; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_frs3_en = _GEN ? r_uop_frs3_en : ren1_uops_0_frs3_en; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_fp_val = _GEN ? r_uop_fp_val : ren1_uops_0_fp_val; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_fp_single = _GEN ? r_uop_fp_single : ren1_uops_0_fp_single; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_xcpt_pf_if = _GEN ? r_uop_xcpt_pf_if : ren1_uops_0_xcpt_pf_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_xcpt_ae_if = _GEN ? r_uop_xcpt_ae_if : ren1_uops_0_xcpt_ae_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_xcpt_ma_if = _GEN & r_uop_xcpt_ma_if; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_bp_debug_if = _GEN ? r_uop_bp_debug_if : ren1_uops_0_bp_debug_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_bp_xcpt_if = _GEN ? r_uop_bp_xcpt_if : ren1_uops_0_bp_xcpt_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_debug_fsrc = _GEN ? r_uop_debug_fsrc : ren1_uops_0_debug_fsrc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_debug_tsrc = _GEN ? r_uop_debug_tsrc : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
wire [6:0] r_uop_newuop_uopc = r_uop_bypassed_uop_uopc; // @[util.scala:73:26]
wire [31:0] r_uop_newuop_inst = r_uop_bypassed_uop_inst; // @[util.scala:73:26]
wire [31:0] r_uop_newuop_debug_inst = r_uop_bypassed_uop_debug_inst; // @[util.scala:73:26]
wire r_uop_newuop_is_rvc = r_uop_bypassed_uop_is_rvc; // @[util.scala:73:26]
wire [39:0] r_uop_newuop_debug_pc = r_uop_bypassed_uop_debug_pc; // @[util.scala:73:26]
wire [2:0] r_uop_newuop_iq_type = r_uop_bypassed_uop_iq_type; // @[util.scala:73:26]
wire [9:0] r_uop_newuop_fu_code = r_uop_bypassed_uop_fu_code; // @[util.scala:73:26]
wire [3:0] r_uop_newuop_ctrl_br_type = r_uop_bypassed_uop_ctrl_br_type; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_ctrl_op1_sel = r_uop_bypassed_uop_ctrl_op1_sel; // @[util.scala:73:26]
wire [2:0] r_uop_newuop_ctrl_op2_sel = r_uop_bypassed_uop_ctrl_op2_sel; // @[util.scala:73:26]
wire [2:0] r_uop_newuop_ctrl_imm_sel = r_uop_bypassed_uop_ctrl_imm_sel; // @[util.scala:73:26]
wire [4:0] r_uop_newuop_ctrl_op_fcn = r_uop_bypassed_uop_ctrl_op_fcn; // @[util.scala:73:26]
wire r_uop_newuop_ctrl_fcn_dw = r_uop_bypassed_uop_ctrl_fcn_dw; // @[util.scala:73:26]
wire [2:0] r_uop_newuop_ctrl_csr_cmd = r_uop_bypassed_uop_ctrl_csr_cmd; // @[util.scala:73:26]
wire r_uop_newuop_ctrl_is_load = r_uop_bypassed_uop_ctrl_is_load; // @[util.scala:73:26]
wire r_uop_newuop_ctrl_is_sta = r_uop_bypassed_uop_ctrl_is_sta; // @[util.scala:73:26]
wire r_uop_newuop_ctrl_is_std = r_uop_bypassed_uop_ctrl_is_std; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_iw_state = r_uop_bypassed_uop_iw_state; // @[util.scala:73:26]
wire r_uop_newuop_iw_p1_poisoned = r_uop_bypassed_uop_iw_p1_poisoned; // @[util.scala:73:26]
wire r_uop_newuop_iw_p2_poisoned = r_uop_bypassed_uop_iw_p2_poisoned; // @[util.scala:73:26]
wire r_uop_newuop_is_br = r_uop_bypassed_uop_is_br; // @[util.scala:73:26]
wire r_uop_newuop_is_jalr = r_uop_bypassed_uop_is_jalr; // @[util.scala:73:26]
wire r_uop_newuop_is_jal = r_uop_bypassed_uop_is_jal; // @[util.scala:73:26]
wire r_uop_newuop_is_sfb = r_uop_bypassed_uop_is_sfb; // @[util.scala:73:26]
wire [3:0] r_uop_newuop_br_tag = r_uop_bypassed_uop_br_tag; // @[util.scala:73:26]
wire [4:0] r_uop_newuop_ftq_idx = r_uop_bypassed_uop_ftq_idx; // @[util.scala:73:26]
wire r_uop_newuop_edge_inst = r_uop_bypassed_uop_edge_inst; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_pc_lob = r_uop_bypassed_uop_pc_lob; // @[util.scala:73:26]
wire r_uop_newuop_taken = r_uop_bypassed_uop_taken; // @[util.scala:73:26]
wire [19:0] r_uop_newuop_imm_packed = r_uop_bypassed_uop_imm_packed; // @[util.scala:73:26]
wire [11:0] r_uop_newuop_csr_addr = r_uop_bypassed_uop_csr_addr; // @[util.scala:73:26]
wire [6:0] r_uop_newuop_rob_idx = r_uop_bypassed_uop_rob_idx; // @[util.scala:73:26]
wire [4:0] r_uop_newuop_ldq_idx = r_uop_bypassed_uop_ldq_idx; // @[util.scala:73:26]
wire [4:0] r_uop_newuop_stq_idx = r_uop_bypassed_uop_stq_idx; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_rxq_idx = r_uop_bypassed_uop_rxq_idx; // @[util.scala:73:26]
wire [6:0] r_uop_newuop_pdst = r_uop_bypassed_uop_pdst; // @[util.scala:73:26]
wire [6:0] r_uop_newuop_prs1 = r_uop_bypassed_uop_prs1; // @[util.scala:73:26]
wire [6:0] r_uop_newuop_prs2 = r_uop_bypassed_uop_prs2; // @[util.scala:73:26]
wire [6:0] r_uop_newuop_prs3 = r_uop_bypassed_uop_prs3; // @[util.scala:73:26]
wire _r_uop_bypassed_uop_prs1_busy_T; // @[rename-stage.scala:199:45]
wire [4:0] r_uop_newuop_ppred = r_uop_bypassed_uop_ppred; // @[util.scala:73:26]
wire _r_uop_bypassed_uop_prs2_busy_T; // @[rename-stage.scala:200:45]
wire r_uop_newuop_prs1_busy = r_uop_bypassed_uop_prs1_busy; // @[util.scala:73:26]
wire _r_uop_bypassed_uop_prs3_busy_T; // @[rename-stage.scala:201:45]
wire r_uop_newuop_prs2_busy = r_uop_bypassed_uop_prs2_busy; // @[util.scala:73:26]
wire r_uop_newuop_prs3_busy = r_uop_bypassed_uop_prs3_busy; // @[util.scala:73:26]
wire r_uop_newuop_ppred_busy = r_uop_bypassed_uop_ppred_busy; // @[util.scala:73:26]
wire [6:0] r_uop_newuop_stale_pdst = r_uop_bypassed_uop_stale_pdst; // @[util.scala:73:26]
wire r_uop_newuop_exception = r_uop_bypassed_uop_exception; // @[util.scala:73:26]
wire [63:0] r_uop_newuop_exc_cause = r_uop_bypassed_uop_exc_cause; // @[util.scala:73:26]
wire r_uop_newuop_bypassable = r_uop_bypassed_uop_bypassable; // @[util.scala:73:26]
wire [4:0] r_uop_newuop_mem_cmd = r_uop_bypassed_uop_mem_cmd; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_mem_size = r_uop_bypassed_uop_mem_size; // @[util.scala:73:26]
wire r_uop_newuop_mem_signed = r_uop_bypassed_uop_mem_signed; // @[util.scala:73:26]
wire r_uop_newuop_is_fence = r_uop_bypassed_uop_is_fence; // @[util.scala:73:26]
wire r_uop_newuop_is_fencei = r_uop_bypassed_uop_is_fencei; // @[util.scala:73:26]
wire r_uop_newuop_is_amo = r_uop_bypassed_uop_is_amo; // @[util.scala:73:26]
wire r_uop_newuop_uses_ldq = r_uop_bypassed_uop_uses_ldq; // @[util.scala:73:26]
wire r_uop_newuop_uses_stq = r_uop_bypassed_uop_uses_stq; // @[util.scala:73:26]
wire r_uop_newuop_is_sys_pc2epc = r_uop_bypassed_uop_is_sys_pc2epc; // @[util.scala:73:26]
wire r_uop_newuop_is_unique = r_uop_bypassed_uop_is_unique; // @[util.scala:73:26]
wire r_uop_newuop_flush_on_commit = r_uop_bypassed_uop_flush_on_commit; // @[util.scala:73:26]
wire r_uop_newuop_ldst_is_rs1 = r_uop_bypassed_uop_ldst_is_rs1; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_ldst = r_uop_bypassed_uop_ldst; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_lrs1 = r_uop_bypassed_uop_lrs1; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_lrs2 = r_uop_bypassed_uop_lrs2; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_lrs3 = r_uop_bypassed_uop_lrs3; // @[util.scala:73:26]
wire r_uop_newuop_ldst_val = r_uop_bypassed_uop_ldst_val; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_dst_rtype = r_uop_bypassed_uop_dst_rtype; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_lrs1_rtype = r_uop_bypassed_uop_lrs1_rtype; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_lrs2_rtype = r_uop_bypassed_uop_lrs2_rtype; // @[util.scala:73:26]
wire r_uop_newuop_frs3_en = r_uop_bypassed_uop_frs3_en; // @[util.scala:73:26]
wire r_uop_newuop_fp_val = r_uop_bypassed_uop_fp_val; // @[util.scala:73:26]
wire r_uop_newuop_fp_single = r_uop_bypassed_uop_fp_single; // @[util.scala:73:26]
wire r_uop_newuop_xcpt_pf_if = r_uop_bypassed_uop_xcpt_pf_if; // @[util.scala:73:26]
wire r_uop_newuop_xcpt_ae_if = r_uop_bypassed_uop_xcpt_ae_if; // @[util.scala:73:26]
wire r_uop_newuop_xcpt_ma_if = r_uop_bypassed_uop_xcpt_ma_if; // @[util.scala:73:26]
wire r_uop_newuop_bp_debug_if = r_uop_bypassed_uop_bp_debug_if; // @[util.scala:73:26]
wire r_uop_newuop_bp_xcpt_if = r_uop_bypassed_uop_bp_xcpt_if; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_debug_fsrc = r_uop_bypassed_uop_debug_fsrc; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_debug_tsrc = r_uop_bypassed_uop_debug_tsrc; // @[util.scala:73:26]
wire _r_uop_bypass_hits_rs1_T = ren2_uops_0_ldst == next_uop_lrs1; // @[rename-stage.scala:108:29, :123:24, :177:87]
wire r_uop_bypass_hits_rs1_0 = ren2_alloc_reqs_0 & _r_uop_bypass_hits_rs1_T; // @[rename-stage.scala:109:29, :177:{77,87}]
wire _r_uop_bypass_hits_rs1_T_1 = ren2_uops_1_ldst == next_uop_lrs1; // @[rename-stage.scala:108:29, :123:24, :177:87]
wire r_uop_bypass_hits_rs1_1 = ren2_alloc_reqs_1 & _r_uop_bypass_hits_rs1_T_1; // @[rename-stage.scala:109:29, :177:{77,87}]
wire _r_uop_bypass_hits_rs1_T_2 = ren2_uops_2_ldst == next_uop_lrs1; // @[rename-stage.scala:108:29, :123:24, :177:87]
wire r_uop_bypass_hits_rs1_2 = ren2_alloc_reqs_2 & _r_uop_bypass_hits_rs1_T_2; // @[rename-stage.scala:109:29, :177:{77,87}]
wire _r_uop_bypass_hits_rs2_T = ren2_uops_0_ldst == next_uop_lrs2; // @[rename-stage.scala:108:29, :123:24, :178:87]
wire r_uop_bypass_hits_rs2_0 = ren2_alloc_reqs_0 & _r_uop_bypass_hits_rs2_T; // @[rename-stage.scala:109:29, :178:{77,87}]
wire _r_uop_bypass_hits_rs2_T_1 = ren2_uops_1_ldst == next_uop_lrs2; // @[rename-stage.scala:108:29, :123:24, :178:87]
wire r_uop_bypass_hits_rs2_1 = ren2_alloc_reqs_1 & _r_uop_bypass_hits_rs2_T_1; // @[rename-stage.scala:109:29, :178:{77,87}]
wire _r_uop_bypass_hits_rs2_T_2 = ren2_uops_2_ldst == next_uop_lrs2; // @[rename-stage.scala:108:29, :123:24, :178:87]
wire r_uop_bypass_hits_rs2_2 = ren2_alloc_reqs_2 & _r_uop_bypass_hits_rs2_T_2; // @[rename-stage.scala:109:29, :178:{77,87}]
wire _r_uop_bypass_hits_rs3_T = ren2_uops_0_ldst == next_uop_lrs3; // @[rename-stage.scala:108:29, :123:24, :179:87]
wire r_uop_bypass_hits_rs3_0 = ren2_alloc_reqs_0 & _r_uop_bypass_hits_rs3_T; // @[rename-stage.scala:109:29, :179:{77,87}]
wire _r_uop_bypass_hits_rs3_T_1 = ren2_uops_1_ldst == next_uop_lrs3; // @[rename-stage.scala:108:29, :123:24, :179:87]
wire r_uop_bypass_hits_rs3_1 = ren2_alloc_reqs_1 & _r_uop_bypass_hits_rs3_T_1; // @[rename-stage.scala:109:29, :179:{77,87}]
wire _r_uop_bypass_hits_rs3_T_2 = ren2_uops_2_ldst == next_uop_lrs3; // @[rename-stage.scala:108:29, :123:24, :179:87]
wire r_uop_bypass_hits_rs3_2 = ren2_alloc_reqs_2 & _r_uop_bypass_hits_rs3_T_2; // @[rename-stage.scala:109:29, :179:{77,87}]
wire _r_uop_bypass_hits_dst_T = ren2_uops_0_ldst == next_uop_ldst; // @[rename-stage.scala:108:29, :123:24, :180:87]
wire r_uop_bypass_hits_dst_0 = ren2_alloc_reqs_0 & _r_uop_bypass_hits_dst_T; // @[rename-stage.scala:109:29, :180:{77,87}]
wire _r_uop_bypass_hits_dst_T_1 = ren2_uops_1_ldst == next_uop_ldst; // @[rename-stage.scala:108:29, :123:24, :180:87]
wire r_uop_bypass_hits_dst_1 = ren2_alloc_reqs_1 & _r_uop_bypass_hits_dst_T_1; // @[rename-stage.scala:109:29, :180:{77,87}]
wire _r_uop_bypass_hits_dst_T_2 = ren2_uops_2_ldst == next_uop_ldst; // @[rename-stage.scala:108:29, :123:24, :180:87]
wire r_uop_bypass_hits_dst_2 = ren2_alloc_reqs_2 & _r_uop_bypass_hits_dst_T_2; // @[rename-stage.scala:109:29, :180:{77,87}]
wire [2:0] _r_uop_bypass_sel_rs1_enc_T = {r_uop_bypass_hits_rs1_0, 2'h0}; // @[Mux.scala:50:70]
wire [2:0] _r_uop_bypass_sel_rs1_enc_T_1 = r_uop_bypass_hits_rs1_1 ? 3'h2 : _r_uop_bypass_sel_rs1_enc_T; // @[Mux.scala:50:70]
wire [2:0] r_uop_bypass_sel_rs1_enc = r_uop_bypass_hits_rs1_2 ? 3'h1 : _r_uop_bypass_sel_rs1_enc_T_1; // @[Mux.scala:50:70]
wire r_uop_bypass_sel_rs1_2 = r_uop_bypass_sel_rs1_enc[0]; // @[OneHot.scala:83:30]
wire r_uop_bypass_sel_rs1_1 = r_uop_bypass_sel_rs1_enc[1]; // @[OneHot.scala:83:30]
wire r_uop_bypass_sel_rs1_0 = r_uop_bypass_sel_rs1_enc[2]; // @[OneHot.scala:83:30]
wire [2:0] _r_uop_bypass_sel_rs2_enc_T = {r_uop_bypass_hits_rs2_0, 2'h0}; // @[Mux.scala:50:70]
wire [2:0] _r_uop_bypass_sel_rs2_enc_T_1 = r_uop_bypass_hits_rs2_1 ? 3'h2 : _r_uop_bypass_sel_rs2_enc_T; // @[Mux.scala:50:70]
wire [2:0] r_uop_bypass_sel_rs2_enc = r_uop_bypass_hits_rs2_2 ? 3'h1 : _r_uop_bypass_sel_rs2_enc_T_1; // @[Mux.scala:50:70]
wire r_uop_bypass_sel_rs2_2 = r_uop_bypass_sel_rs2_enc[0]; // @[OneHot.scala:83:30]
wire r_uop_bypass_sel_rs2_1 = r_uop_bypass_sel_rs2_enc[1]; // @[OneHot.scala:83:30]
wire r_uop_bypass_sel_rs2_0 = r_uop_bypass_sel_rs2_enc[2]; // @[OneHot.scala:83:30]
wire [2:0] _r_uop_bypass_sel_rs3_enc_T = {r_uop_bypass_hits_rs3_0, 2'h0}; // @[Mux.scala:50:70]
wire [2:0] _r_uop_bypass_sel_rs3_enc_T_1 = r_uop_bypass_hits_rs3_1 ? 3'h2 : _r_uop_bypass_sel_rs3_enc_T; // @[Mux.scala:50:70]
wire [2:0] r_uop_bypass_sel_rs3_enc = r_uop_bypass_hits_rs3_2 ? 3'h1 : _r_uop_bypass_sel_rs3_enc_T_1; // @[Mux.scala:50:70]
wire r_uop_bypass_sel_rs3_2 = r_uop_bypass_sel_rs3_enc[0]; // @[OneHot.scala:83:30]
wire r_uop_bypass_sel_rs3_1 = r_uop_bypass_sel_rs3_enc[1]; // @[OneHot.scala:83:30]
wire r_uop_bypass_sel_rs3_0 = r_uop_bypass_sel_rs3_enc[2]; // @[OneHot.scala:83:30]
wire [2:0] _r_uop_bypass_sel_dst_enc_T = {r_uop_bypass_hits_dst_0, 2'h0}; // @[Mux.scala:50:70]
wire [2:0] _r_uop_bypass_sel_dst_enc_T_1 = r_uop_bypass_hits_dst_1 ? 3'h2 : _r_uop_bypass_sel_dst_enc_T; // @[Mux.scala:50:70]
wire [2:0] r_uop_bypass_sel_dst_enc = r_uop_bypass_hits_dst_2 ? 3'h1 : _r_uop_bypass_sel_dst_enc_T_1; // @[Mux.scala:50:70]
wire r_uop_bypass_sel_dst_2 = r_uop_bypass_sel_dst_enc[0]; // @[OneHot.scala:83:30]
wire r_uop_bypass_sel_dst_1 = r_uop_bypass_sel_dst_enc[1]; // @[OneHot.scala:83:30]
wire r_uop_bypass_sel_dst_0 = r_uop_bypass_sel_dst_enc[2]; // @[OneHot.scala:83:30]
wire _r_uop_do_bypass_rs1_T = r_uop_bypass_hits_rs1_0 | r_uop_bypass_hits_rs1_1; // @[rename-stage.scala:177:77, :187:49]
wire r_uop_do_bypass_rs1 = _r_uop_do_bypass_rs1_T | r_uop_bypass_hits_rs1_2; // @[rename-stage.scala:177:77, :187:49]
wire _r_uop_do_bypass_rs2_T = r_uop_bypass_hits_rs2_0 | r_uop_bypass_hits_rs2_1; // @[rename-stage.scala:178:77, :188:49]
wire r_uop_do_bypass_rs2 = _r_uop_do_bypass_rs2_T | r_uop_bypass_hits_rs2_2; // @[rename-stage.scala:178:77, :188:49]
wire _r_uop_do_bypass_rs3_T = r_uop_bypass_hits_rs3_0 | r_uop_bypass_hits_rs3_1; // @[rename-stage.scala:179:77, :189:49]
wire r_uop_do_bypass_rs3 = _r_uop_do_bypass_rs3_T | r_uop_bypass_hits_rs3_2; // @[rename-stage.scala:179:77, :189:49]
wire _r_uop_do_bypass_dst_T = r_uop_bypass_hits_dst_0 | r_uop_bypass_hits_dst_1; // @[rename-stage.scala:180:77, :190:49]
wire r_uop_do_bypass_dst = _r_uop_do_bypass_dst_T | r_uop_bypass_hits_dst_2; // @[rename-stage.scala:180:77, :190:49]
wire [6:0] _r_uop_bypassed_uop_prs1_T = r_uop_bypass_sel_rs1_0 ? ren2_uops_0_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs1_T_1 = r_uop_bypass_sel_rs1_1 ? ren2_uops_1_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs1_T_2 = r_uop_bypass_sel_rs1_2 ? ren2_uops_2_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs1_T_3 = _r_uop_bypassed_uop_prs1_T | _r_uop_bypassed_uop_prs1_T_1; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_prs1_T_4 = _r_uop_bypassed_uop_prs1_T_3 | _r_uop_bypassed_uop_prs1_T_2; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_prs1_WIRE = _r_uop_bypassed_uop_prs1_T_4; // @[Mux.scala:30:73]
assign r_uop_bypassed_uop_prs1 = r_uop_do_bypass_rs1 ? _r_uop_bypassed_uop_prs1_WIRE : next_uop_prs1; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_prs2_T = r_uop_bypass_sel_rs2_0 ? ren2_uops_0_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs2_T_1 = r_uop_bypass_sel_rs2_1 ? ren2_uops_1_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs2_T_2 = r_uop_bypass_sel_rs2_2 ? ren2_uops_2_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs2_T_3 = _r_uop_bypassed_uop_prs2_T | _r_uop_bypassed_uop_prs2_T_1; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_prs2_T_4 = _r_uop_bypassed_uop_prs2_T_3 | _r_uop_bypassed_uop_prs2_T_2; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_prs2_WIRE = _r_uop_bypassed_uop_prs2_T_4; // @[Mux.scala:30:73]
assign r_uop_bypassed_uop_prs2 = r_uop_do_bypass_rs2 ? _r_uop_bypassed_uop_prs2_WIRE : next_uop_prs2; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_prs3_T = r_uop_bypass_sel_rs3_0 ? ren2_uops_0_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs3_T_1 = r_uop_bypass_sel_rs3_1 ? ren2_uops_1_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs3_T_2 = r_uop_bypass_sel_rs3_2 ? ren2_uops_2_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs3_T_3 = _r_uop_bypassed_uop_prs3_T | _r_uop_bypassed_uop_prs3_T_1; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_prs3_T_4 = _r_uop_bypassed_uop_prs3_T_3 | _r_uop_bypassed_uop_prs3_T_2; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_prs3_WIRE = _r_uop_bypassed_uop_prs3_T_4; // @[Mux.scala:30:73]
assign r_uop_bypassed_uop_prs3 = r_uop_do_bypass_rs3 ? _r_uop_bypassed_uop_prs3_WIRE : next_uop_prs3; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_stale_pdst_T = r_uop_bypass_sel_dst_0 ? ren2_uops_0_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_stale_pdst_T_1 = r_uop_bypass_sel_dst_1 ? ren2_uops_1_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_stale_pdst_T_2 = r_uop_bypass_sel_dst_2 ? ren2_uops_2_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_stale_pdst_T_3 = _r_uop_bypassed_uop_stale_pdst_T | _r_uop_bypassed_uop_stale_pdst_T_1; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_stale_pdst_T_4 = _r_uop_bypassed_uop_stale_pdst_T_3 | _r_uop_bypassed_uop_stale_pdst_T_2; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_stale_pdst_WIRE = _r_uop_bypassed_uop_stale_pdst_T_4; // @[Mux.scala:30:73]
assign r_uop_bypassed_uop_stale_pdst = r_uop_do_bypass_dst ? _r_uop_bypassed_uop_stale_pdst_WIRE : next_uop_stale_pdst; // @[Mux.scala:30:73]
assign _r_uop_bypassed_uop_prs1_busy_T = next_uop_prs1_busy | r_uop_do_bypass_rs1; // @[rename-stage.scala:123:24, :187:49, :199:45]
assign r_uop_bypassed_uop_prs1_busy = _r_uop_bypassed_uop_prs1_busy_T; // @[rename-stage.scala:174:28, :199:45]
assign _r_uop_bypassed_uop_prs2_busy_T = next_uop_prs2_busy | r_uop_do_bypass_rs2; // @[rename-stage.scala:123:24, :188:49, :200:45]
assign r_uop_bypassed_uop_prs2_busy = _r_uop_bypassed_uop_prs2_busy_T; // @[rename-stage.scala:174:28, :200:45]
assign _r_uop_bypassed_uop_prs3_busy_T = next_uop_prs3_busy | r_uop_do_bypass_rs3; // @[rename-stage.scala:123:24, :189:49, :201:45]
assign r_uop_bypassed_uop_prs3_busy = _r_uop_bypassed_uop_prs3_busy_T; // @[rename-stage.scala:174:28, :201:45]
wire [15:0] _r_uop_newuop_br_mask_T_1; // @[util.scala:74:35]
wire [15:0] r_uop_newuop_br_mask; // @[util.scala:73:26]
wire [15:0] _r_uop_newuop_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:74:37]
assign _r_uop_newuop_br_mask_T_1 = r_uop_bypassed_uop_br_mask & _r_uop_newuop_br_mask_T; // @[util.scala:74:{35,37}]
assign r_uop_newuop_br_mask = _r_uop_newuop_br_mask_T_1; // @[util.scala:73:26, :74:35]
reg r_valid_1; // @[rename-stage.scala:121:27]
assign ren2_valids_1 = r_valid_1; // @[rename-stage.scala:107:29, :121:27]
reg [6:0] r_uop_1_uopc; // @[rename-stage.scala:122:23]
assign ren2_uops_1_uopc = r_uop_1_uopc; // @[rename-stage.scala:108:29, :122:23]
reg [31:0] r_uop_1_inst; // @[rename-stage.scala:122:23]
assign ren2_uops_1_inst = r_uop_1_inst; // @[rename-stage.scala:108:29, :122:23]
reg [31:0] r_uop_1_debug_inst; // @[rename-stage.scala:122:23]
assign ren2_uops_1_debug_inst = r_uop_1_debug_inst; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_is_rvc; // @[rename-stage.scala:122:23]
assign ren2_uops_1_is_rvc = r_uop_1_is_rvc; // @[rename-stage.scala:108:29, :122:23]
reg [39:0] r_uop_1_debug_pc; // @[rename-stage.scala:122:23]
assign ren2_uops_1_debug_pc = r_uop_1_debug_pc; // @[rename-stage.scala:108:29, :122:23]
reg [2:0] r_uop_1_iq_type; // @[rename-stage.scala:122:23]
assign ren2_uops_1_iq_type = r_uop_1_iq_type; // @[rename-stage.scala:108:29, :122:23]
reg [9:0] r_uop_1_fu_code; // @[rename-stage.scala:122:23]
assign ren2_uops_1_fu_code = r_uop_1_fu_code; // @[rename-stage.scala:108:29, :122:23]
reg [3:0] r_uop_1_ctrl_br_type; // @[rename-stage.scala:122:23]
assign ren2_uops_1_ctrl_br_type = r_uop_1_ctrl_br_type; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_1_ctrl_op1_sel; // @[rename-stage.scala:122:23]
assign ren2_uops_1_ctrl_op1_sel = r_uop_1_ctrl_op1_sel; // @[rename-stage.scala:108:29, :122:23]
reg [2:0] r_uop_1_ctrl_op2_sel; // @[rename-stage.scala:122:23]
assign ren2_uops_1_ctrl_op2_sel = r_uop_1_ctrl_op2_sel; // @[rename-stage.scala:108:29, :122:23]
reg [2:0] r_uop_1_ctrl_imm_sel; // @[rename-stage.scala:122:23]
assign ren2_uops_1_ctrl_imm_sel = r_uop_1_ctrl_imm_sel; // @[rename-stage.scala:108:29, :122:23]
reg [4:0] r_uop_1_ctrl_op_fcn; // @[rename-stage.scala:122:23]
assign ren2_uops_1_ctrl_op_fcn = r_uop_1_ctrl_op_fcn; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_ctrl_fcn_dw; // @[rename-stage.scala:122:23]
assign ren2_uops_1_ctrl_fcn_dw = r_uop_1_ctrl_fcn_dw; // @[rename-stage.scala:108:29, :122:23]
reg [2:0] r_uop_1_ctrl_csr_cmd; // @[rename-stage.scala:122:23]
assign ren2_uops_1_ctrl_csr_cmd = r_uop_1_ctrl_csr_cmd; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_ctrl_is_load; // @[rename-stage.scala:122:23]
assign ren2_uops_1_ctrl_is_load = r_uop_1_ctrl_is_load; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_ctrl_is_sta; // @[rename-stage.scala:122:23]
assign ren2_uops_1_ctrl_is_sta = r_uop_1_ctrl_is_sta; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_ctrl_is_std; // @[rename-stage.scala:122:23]
assign ren2_uops_1_ctrl_is_std = r_uop_1_ctrl_is_std; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_1_iw_state; // @[rename-stage.scala:122:23]
assign ren2_uops_1_iw_state = r_uop_1_iw_state; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_iw_p1_poisoned; // @[rename-stage.scala:122:23]
assign ren2_uops_1_iw_p1_poisoned = r_uop_1_iw_p1_poisoned; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_iw_p2_poisoned; // @[rename-stage.scala:122:23]
assign ren2_uops_1_iw_p2_poisoned = r_uop_1_iw_p2_poisoned; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_is_br; // @[rename-stage.scala:122:23]
assign ren2_uops_1_is_br = r_uop_1_is_br; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_is_jalr; // @[rename-stage.scala:122:23]
assign ren2_uops_1_is_jalr = r_uop_1_is_jalr; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_is_jal; // @[rename-stage.scala:122:23]
assign ren2_uops_1_is_jal = r_uop_1_is_jal; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_is_sfb; // @[rename-stage.scala:122:23]
assign ren2_uops_1_is_sfb = r_uop_1_is_sfb; // @[rename-stage.scala:108:29, :122:23]
reg [15:0] r_uop_1_br_mask; // @[rename-stage.scala:122:23]
assign ren2_uops_1_br_mask = r_uop_1_br_mask; // @[rename-stage.scala:108:29, :122:23]
reg [3:0] r_uop_1_br_tag; // @[rename-stage.scala:122:23]
assign ren2_uops_1_br_tag = r_uop_1_br_tag; // @[rename-stage.scala:108:29, :122:23]
reg [4:0] r_uop_1_ftq_idx; // @[rename-stage.scala:122:23]
assign ren2_uops_1_ftq_idx = r_uop_1_ftq_idx; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_edge_inst; // @[rename-stage.scala:122:23]
assign ren2_uops_1_edge_inst = r_uop_1_edge_inst; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_1_pc_lob; // @[rename-stage.scala:122:23]
assign ren2_uops_1_pc_lob = r_uop_1_pc_lob; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_taken; // @[rename-stage.scala:122:23]
assign ren2_uops_1_taken = r_uop_1_taken; // @[rename-stage.scala:108:29, :122:23]
reg [19:0] r_uop_1_imm_packed; // @[rename-stage.scala:122:23]
assign ren2_uops_1_imm_packed = r_uop_1_imm_packed; // @[rename-stage.scala:108:29, :122:23]
reg [11:0] r_uop_1_csr_addr; // @[rename-stage.scala:122:23]
assign ren2_uops_1_csr_addr = r_uop_1_csr_addr; // @[rename-stage.scala:108:29, :122:23]
reg [6:0] r_uop_1_rob_idx; // @[rename-stage.scala:122:23]
assign ren2_uops_1_rob_idx = r_uop_1_rob_idx; // @[rename-stage.scala:108:29, :122:23]
reg [4:0] r_uop_1_ldq_idx; // @[rename-stage.scala:122:23]
assign ren2_uops_1_ldq_idx = r_uop_1_ldq_idx; // @[rename-stage.scala:108:29, :122:23]
reg [4:0] r_uop_1_stq_idx; // @[rename-stage.scala:122:23]
assign ren2_uops_1_stq_idx = r_uop_1_stq_idx; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_1_rxq_idx; // @[rename-stage.scala:122:23]
assign ren2_uops_1_rxq_idx = r_uop_1_rxq_idx; // @[rename-stage.scala:108:29, :122:23]
reg [6:0] r_uop_1_pdst; // @[rename-stage.scala:122:23]
reg [6:0] r_uop_1_prs1; // @[rename-stage.scala:122:23]
assign ren2_uops_1_prs1 = r_uop_1_prs1; // @[rename-stage.scala:108:29, :122:23]
reg [6:0] r_uop_1_prs2; // @[rename-stage.scala:122:23]
assign ren2_uops_1_prs2 = r_uop_1_prs2; // @[rename-stage.scala:108:29, :122:23]
reg [6:0] r_uop_1_prs3; // @[rename-stage.scala:122:23]
assign ren2_uops_1_prs3 = r_uop_1_prs3; // @[rename-stage.scala:108:29, :122:23]
reg [4:0] r_uop_1_ppred; // @[rename-stage.scala:122:23]
assign ren2_uops_1_ppred = r_uop_1_ppred; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_prs1_busy; // @[rename-stage.scala:122:23]
reg r_uop_1_prs2_busy; // @[rename-stage.scala:122:23]
reg r_uop_1_prs3_busy; // @[rename-stage.scala:122:23]
reg r_uop_1_ppred_busy; // @[rename-stage.scala:122:23]
assign ren2_uops_1_ppred_busy = r_uop_1_ppred_busy; // @[rename-stage.scala:108:29, :122:23]
reg [6:0] r_uop_1_stale_pdst; // @[rename-stage.scala:122:23]
assign ren2_uops_1_stale_pdst = r_uop_1_stale_pdst; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_exception; // @[rename-stage.scala:122:23]
assign ren2_uops_1_exception = r_uop_1_exception; // @[rename-stage.scala:108:29, :122:23]
reg [63:0] r_uop_1_exc_cause; // @[rename-stage.scala:122:23]
assign ren2_uops_1_exc_cause = r_uop_1_exc_cause; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_bypassable; // @[rename-stage.scala:122:23]
assign ren2_uops_1_bypassable = r_uop_1_bypassable; // @[rename-stage.scala:108:29, :122:23]
reg [4:0] r_uop_1_mem_cmd; // @[rename-stage.scala:122:23]
assign ren2_uops_1_mem_cmd = r_uop_1_mem_cmd; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_1_mem_size; // @[rename-stage.scala:122:23]
assign ren2_uops_1_mem_size = r_uop_1_mem_size; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_mem_signed; // @[rename-stage.scala:122:23]
assign ren2_uops_1_mem_signed = r_uop_1_mem_signed; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_is_fence; // @[rename-stage.scala:122:23]
assign ren2_uops_1_is_fence = r_uop_1_is_fence; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_is_fencei; // @[rename-stage.scala:122:23]
assign ren2_uops_1_is_fencei = r_uop_1_is_fencei; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_is_amo; // @[rename-stage.scala:122:23]
assign ren2_uops_1_is_amo = r_uop_1_is_amo; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_uses_ldq; // @[rename-stage.scala:122:23]
assign ren2_uops_1_uses_ldq = r_uop_1_uses_ldq; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_uses_stq; // @[rename-stage.scala:122:23]
assign ren2_uops_1_uses_stq = r_uop_1_uses_stq; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_is_sys_pc2epc; // @[rename-stage.scala:122:23]
assign ren2_uops_1_is_sys_pc2epc = r_uop_1_is_sys_pc2epc; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_is_unique; // @[rename-stage.scala:122:23]
assign ren2_uops_1_is_unique = r_uop_1_is_unique; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_flush_on_commit; // @[rename-stage.scala:122:23]
assign ren2_uops_1_flush_on_commit = r_uop_1_flush_on_commit; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_ldst_is_rs1; // @[rename-stage.scala:122:23]
assign ren2_uops_1_ldst_is_rs1 = r_uop_1_ldst_is_rs1; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_1_ldst; // @[rename-stage.scala:122:23]
assign ren2_uops_1_ldst = r_uop_1_ldst; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_1_lrs1; // @[rename-stage.scala:122:23]
assign ren2_uops_1_lrs1 = r_uop_1_lrs1; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_1_lrs2; // @[rename-stage.scala:122:23]
assign ren2_uops_1_lrs2 = r_uop_1_lrs2; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_1_lrs3; // @[rename-stage.scala:122:23]
assign ren2_uops_1_lrs3 = r_uop_1_lrs3; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_ldst_val; // @[rename-stage.scala:122:23]
assign ren2_uops_1_ldst_val = r_uop_1_ldst_val; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_1_dst_rtype; // @[rename-stage.scala:122:23]
assign ren2_uops_1_dst_rtype = r_uop_1_dst_rtype; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_1_lrs1_rtype; // @[rename-stage.scala:122:23]
assign ren2_uops_1_lrs1_rtype = r_uop_1_lrs1_rtype; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_1_lrs2_rtype; // @[rename-stage.scala:122:23]
assign ren2_uops_1_lrs2_rtype = r_uop_1_lrs2_rtype; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_frs3_en; // @[rename-stage.scala:122:23]
assign ren2_uops_1_frs3_en = r_uop_1_frs3_en; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_fp_val; // @[rename-stage.scala:122:23]
assign ren2_uops_1_fp_val = r_uop_1_fp_val; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_fp_single; // @[rename-stage.scala:122:23]
assign ren2_uops_1_fp_single = r_uop_1_fp_single; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_xcpt_pf_if; // @[rename-stage.scala:122:23]
assign ren2_uops_1_xcpt_pf_if = r_uop_1_xcpt_pf_if; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_xcpt_ae_if; // @[rename-stage.scala:122:23]
assign ren2_uops_1_xcpt_ae_if = r_uop_1_xcpt_ae_if; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_xcpt_ma_if; // @[rename-stage.scala:122:23]
assign ren2_uops_1_xcpt_ma_if = r_uop_1_xcpt_ma_if; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_bp_debug_if; // @[rename-stage.scala:122:23]
assign ren2_uops_1_bp_debug_if = r_uop_1_bp_debug_if; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_1_bp_xcpt_if; // @[rename-stage.scala:122:23]
assign ren2_uops_1_bp_xcpt_if = r_uop_1_bp_xcpt_if; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_1_debug_fsrc; // @[rename-stage.scala:122:23]
assign ren2_uops_1_debug_fsrc = r_uop_1_debug_fsrc; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_1_debug_tsrc; // @[rename-stage.scala:122:23]
assign ren2_uops_1_debug_tsrc = r_uop_1_debug_tsrc; // @[rename-stage.scala:108:29, :122:23]
wire [6:0] r_uop_bypassed_uop_1_uopc = next_uop_1_uopc; // @[rename-stage.scala:123:24, :174:28]
wire [31:0] r_uop_bypassed_uop_1_inst = next_uop_1_inst; // @[rename-stage.scala:123:24, :174:28]
wire [31:0] r_uop_bypassed_uop_1_debug_inst = next_uop_1_debug_inst; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_is_rvc = next_uop_1_is_rvc; // @[rename-stage.scala:123:24, :174:28]
wire [39:0] r_uop_bypassed_uop_1_debug_pc = next_uop_1_debug_pc; // @[rename-stage.scala:123:24, :174:28]
wire [2:0] r_uop_bypassed_uop_1_iq_type = next_uop_1_iq_type; // @[rename-stage.scala:123:24, :174:28]
wire [9:0] r_uop_bypassed_uop_1_fu_code = next_uop_1_fu_code; // @[rename-stage.scala:123:24, :174:28]
wire [3:0] r_uop_bypassed_uop_1_ctrl_br_type = next_uop_1_ctrl_br_type; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_1_ctrl_op1_sel = next_uop_1_ctrl_op1_sel; // @[rename-stage.scala:123:24, :174:28]
wire [2:0] r_uop_bypassed_uop_1_ctrl_op2_sel = next_uop_1_ctrl_op2_sel; // @[rename-stage.scala:123:24, :174:28]
wire [2:0] r_uop_bypassed_uop_1_ctrl_imm_sel = next_uop_1_ctrl_imm_sel; // @[rename-stage.scala:123:24, :174:28]
wire [4:0] r_uop_bypassed_uop_1_ctrl_op_fcn = next_uop_1_ctrl_op_fcn; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_ctrl_fcn_dw = next_uop_1_ctrl_fcn_dw; // @[rename-stage.scala:123:24, :174:28]
wire [2:0] r_uop_bypassed_uop_1_ctrl_csr_cmd = next_uop_1_ctrl_csr_cmd; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_ctrl_is_load = next_uop_1_ctrl_is_load; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_ctrl_is_sta = next_uop_1_ctrl_is_sta; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_ctrl_is_std = next_uop_1_ctrl_is_std; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_1_iw_state = next_uop_1_iw_state; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_iw_p1_poisoned = next_uop_1_iw_p1_poisoned; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_iw_p2_poisoned = next_uop_1_iw_p2_poisoned; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_is_br = next_uop_1_is_br; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_is_jalr = next_uop_1_is_jalr; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_is_jal = next_uop_1_is_jal; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_is_sfb = next_uop_1_is_sfb; // @[rename-stage.scala:123:24, :174:28]
wire [15:0] r_uop_bypassed_uop_1_br_mask = next_uop_1_br_mask; // @[rename-stage.scala:123:24, :174:28]
wire [3:0] r_uop_bypassed_uop_1_br_tag = next_uop_1_br_tag; // @[rename-stage.scala:123:24, :174:28]
wire [4:0] r_uop_bypassed_uop_1_ftq_idx = next_uop_1_ftq_idx; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_edge_inst = next_uop_1_edge_inst; // @[rename-stage.scala:123:24, :174:28]
wire [5:0] r_uop_bypassed_uop_1_pc_lob = next_uop_1_pc_lob; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_taken = next_uop_1_taken; // @[rename-stage.scala:123:24, :174:28]
wire [19:0] r_uop_bypassed_uop_1_imm_packed = next_uop_1_imm_packed; // @[rename-stage.scala:123:24, :174:28]
wire [11:0] r_uop_bypassed_uop_1_csr_addr = next_uop_1_csr_addr; // @[rename-stage.scala:123:24, :174:28]
wire [6:0] r_uop_bypassed_uop_1_rob_idx = next_uop_1_rob_idx; // @[rename-stage.scala:123:24, :174:28]
wire [4:0] r_uop_bypassed_uop_1_ldq_idx = next_uop_1_ldq_idx; // @[rename-stage.scala:123:24, :174:28]
wire [4:0] r_uop_bypassed_uop_1_stq_idx = next_uop_1_stq_idx; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_1_rxq_idx = next_uop_1_rxq_idx; // @[rename-stage.scala:123:24, :174:28]
wire [6:0] r_uop_bypassed_uop_1_pdst = next_uop_1_pdst; // @[rename-stage.scala:123:24, :174:28]
wire [4:0] r_uop_bypassed_uop_1_ppred = next_uop_1_ppred; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_ppred_busy = next_uop_1_ppred_busy; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_exception = next_uop_1_exception; // @[rename-stage.scala:123:24, :174:28]
wire [63:0] r_uop_bypassed_uop_1_exc_cause = next_uop_1_exc_cause; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_bypassable = next_uop_1_bypassable; // @[rename-stage.scala:123:24, :174:28]
wire [4:0] r_uop_bypassed_uop_1_mem_cmd = next_uop_1_mem_cmd; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_1_mem_size = next_uop_1_mem_size; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_mem_signed = next_uop_1_mem_signed; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_is_fence = next_uop_1_is_fence; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_is_fencei = next_uop_1_is_fencei; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_is_amo = next_uop_1_is_amo; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_uses_ldq = next_uop_1_uses_ldq; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_uses_stq = next_uop_1_uses_stq; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_is_sys_pc2epc = next_uop_1_is_sys_pc2epc; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_is_unique = next_uop_1_is_unique; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_flush_on_commit = next_uop_1_flush_on_commit; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_ldst_is_rs1 = next_uop_1_ldst_is_rs1; // @[rename-stage.scala:123:24, :174:28]
wire [5:0] r_uop_bypassed_uop_1_ldst = next_uop_1_ldst; // @[rename-stage.scala:123:24, :174:28]
wire [5:0] r_uop_bypassed_uop_1_lrs1 = next_uop_1_lrs1; // @[rename-stage.scala:123:24, :174:28]
wire [5:0] r_uop_bypassed_uop_1_lrs2 = next_uop_1_lrs2; // @[rename-stage.scala:123:24, :174:28]
wire [5:0] r_uop_bypassed_uop_1_lrs3 = next_uop_1_lrs3; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_ldst_val = next_uop_1_ldst_val; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_1_dst_rtype = next_uop_1_dst_rtype; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_1_lrs1_rtype = next_uop_1_lrs1_rtype; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_1_lrs2_rtype = next_uop_1_lrs2_rtype; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_frs3_en = next_uop_1_frs3_en; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_fp_val = next_uop_1_fp_val; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_fp_single = next_uop_1_fp_single; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_xcpt_pf_if = next_uop_1_xcpt_pf_if; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_xcpt_ae_if = next_uop_1_xcpt_ae_if; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_xcpt_ma_if = next_uop_1_xcpt_ma_if; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_bp_debug_if = next_uop_1_bp_debug_if; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_1_bp_xcpt_if = next_uop_1_bp_xcpt_if; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_1_debug_fsrc = next_uop_1_debug_fsrc; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_1_debug_tsrc = next_uop_1_debug_tsrc; // @[rename-stage.scala:123:24, :174:28]
wire [6:0] next_uop_1_prs1; // @[rename-stage.scala:123:24]
wire [6:0] next_uop_1_prs2; // @[rename-stage.scala:123:24]
wire [6:0] next_uop_1_prs3; // @[rename-stage.scala:123:24]
wire next_uop_1_prs1_busy; // @[rename-stage.scala:123:24]
wire next_uop_1_prs2_busy; // @[rename-stage.scala:123:24]
wire next_uop_1_prs3_busy; // @[rename-stage.scala:123:24]
wire [6:0] next_uop_1_stale_pdst; // @[rename-stage.scala:123:24]
wire _r_valid_T_2 = ~io_dis_fire_1_0; // @[rename-stage.scala:133:29, :160:7]
wire _r_valid_T_3 = r_valid_1 & _r_valid_T_2; // @[rename-stage.scala:121:27, :133:{26,29}]
assign next_uop_1_uopc = _GEN ? r_uop_1_uopc : ren1_uops_1_uopc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_inst = _GEN ? r_uop_1_inst : ren1_uops_1_inst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_debug_inst = _GEN ? r_uop_1_debug_inst : ren1_uops_1_debug_inst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_is_rvc = _GEN ? r_uop_1_is_rvc : ren1_uops_1_is_rvc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_debug_pc = _GEN ? r_uop_1_debug_pc : ren1_uops_1_debug_pc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_iq_type = _GEN ? r_uop_1_iq_type : ren1_uops_1_iq_type; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_fu_code = _GEN ? r_uop_1_fu_code : ren1_uops_1_fu_code; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_ctrl_br_type = _GEN ? r_uop_1_ctrl_br_type : 4'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_ctrl_op1_sel = _GEN ? r_uop_1_ctrl_op1_sel : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_ctrl_op2_sel = _GEN ? r_uop_1_ctrl_op2_sel : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_ctrl_imm_sel = _GEN ? r_uop_1_ctrl_imm_sel : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_ctrl_op_fcn = _GEN ? r_uop_1_ctrl_op_fcn : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_ctrl_fcn_dw = _GEN & r_uop_1_ctrl_fcn_dw; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_ctrl_csr_cmd = _GEN ? r_uop_1_ctrl_csr_cmd : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_ctrl_is_load = _GEN & r_uop_1_ctrl_is_load; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_ctrl_is_sta = _GEN & r_uop_1_ctrl_is_sta; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_ctrl_is_std = _GEN & r_uop_1_ctrl_is_std; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_iw_state = _GEN ? r_uop_1_iw_state : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_iw_p1_poisoned = _GEN & r_uop_1_iw_p1_poisoned; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_iw_p2_poisoned = _GEN & r_uop_1_iw_p2_poisoned; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_is_br = _GEN ? r_uop_1_is_br : ren1_uops_1_is_br; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_is_jalr = _GEN ? r_uop_1_is_jalr : ren1_uops_1_is_jalr; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_is_jal = _GEN ? r_uop_1_is_jal : ren1_uops_1_is_jal; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_is_sfb = _GEN ? r_uop_1_is_sfb : ren1_uops_1_is_sfb; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_br_mask = _GEN ? r_uop_1_br_mask : ren1_uops_1_br_mask; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_br_tag = _GEN ? r_uop_1_br_tag : ren1_uops_1_br_tag; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_ftq_idx = _GEN ? r_uop_1_ftq_idx : ren1_uops_1_ftq_idx; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_edge_inst = _GEN ? r_uop_1_edge_inst : ren1_uops_1_edge_inst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_pc_lob = _GEN ? r_uop_1_pc_lob : ren1_uops_1_pc_lob; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_taken = _GEN ? r_uop_1_taken : ren1_uops_1_taken; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_imm_packed = _GEN ? r_uop_1_imm_packed : ren1_uops_1_imm_packed; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_csr_addr = _GEN ? r_uop_1_csr_addr : 12'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_rob_idx = _GEN ? r_uop_1_rob_idx : 7'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_ldq_idx = _GEN ? r_uop_1_ldq_idx : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_stq_idx = _GEN ? r_uop_1_stq_idx : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_rxq_idx = _GEN ? r_uop_1_rxq_idx : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_pdst = _GEN ? r_uop_1_pdst : 7'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_prs1 = _GEN ? r_uop_1_prs1 : ren1_uops_1_prs1; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_prs2 = _GEN ? r_uop_1_prs2 : ren1_uops_1_prs2; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_prs3 = _GEN ? r_uop_1_prs3 : ren1_uops_1_prs3; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_ppred = _GEN ? r_uop_1_ppred : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_prs1_busy = _GEN & r_uop_1_prs1_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_prs2_busy = _GEN & r_uop_1_prs2_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_prs3_busy = _GEN & r_uop_1_prs3_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_ppred_busy = _GEN & r_uop_1_ppred_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_stale_pdst = _GEN ? r_uop_1_stale_pdst : ren1_uops_1_stale_pdst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_exception = _GEN ? r_uop_1_exception : ren1_uops_1_exception; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_exc_cause = _GEN ? r_uop_1_exc_cause : ren1_uops_1_exc_cause; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_bypassable = _GEN ? r_uop_1_bypassable : ren1_uops_1_bypassable; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_mem_cmd = _GEN ? r_uop_1_mem_cmd : ren1_uops_1_mem_cmd; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_mem_size = _GEN ? r_uop_1_mem_size : ren1_uops_1_mem_size; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_mem_signed = _GEN ? r_uop_1_mem_signed : ren1_uops_1_mem_signed; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_is_fence = _GEN ? r_uop_1_is_fence : ren1_uops_1_is_fence; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_is_fencei = _GEN ? r_uop_1_is_fencei : ren1_uops_1_is_fencei; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_is_amo = _GEN ? r_uop_1_is_amo : ren1_uops_1_is_amo; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_uses_ldq = _GEN ? r_uop_1_uses_ldq : ren1_uops_1_uses_ldq; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_uses_stq = _GEN ? r_uop_1_uses_stq : ren1_uops_1_uses_stq; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_is_sys_pc2epc = _GEN ? r_uop_1_is_sys_pc2epc : ren1_uops_1_is_sys_pc2epc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_is_unique = _GEN ? r_uop_1_is_unique : ren1_uops_1_is_unique; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_flush_on_commit = _GEN ? r_uop_1_flush_on_commit : ren1_uops_1_flush_on_commit; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_ldst_is_rs1 = _GEN & r_uop_1_ldst_is_rs1; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_ldst = _GEN ? r_uop_1_ldst : ren1_uops_1_ldst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_lrs1 = _GEN ? r_uop_1_lrs1 : ren1_uops_1_lrs1; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_lrs2 = _GEN ? r_uop_1_lrs2 : ren1_uops_1_lrs2; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_lrs3 = _GEN ? r_uop_1_lrs3 : ren1_uops_1_lrs3; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_ldst_val = _GEN ? r_uop_1_ldst_val : ren1_uops_1_ldst_val; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_dst_rtype = _GEN ? r_uop_1_dst_rtype : ren1_uops_1_dst_rtype; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_lrs1_rtype = _GEN ? r_uop_1_lrs1_rtype : ren1_uops_1_lrs1_rtype; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_lrs2_rtype = _GEN ? r_uop_1_lrs2_rtype : ren1_uops_1_lrs2_rtype; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_frs3_en = _GEN ? r_uop_1_frs3_en : ren1_uops_1_frs3_en; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_fp_val = _GEN ? r_uop_1_fp_val : ren1_uops_1_fp_val; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_fp_single = _GEN ? r_uop_1_fp_single : ren1_uops_1_fp_single; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_xcpt_pf_if = _GEN ? r_uop_1_xcpt_pf_if : ren1_uops_1_xcpt_pf_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_xcpt_ae_if = _GEN ? r_uop_1_xcpt_ae_if : ren1_uops_1_xcpt_ae_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_xcpt_ma_if = _GEN & r_uop_1_xcpt_ma_if; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_bp_debug_if = _GEN ? r_uop_1_bp_debug_if : ren1_uops_1_bp_debug_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_bp_xcpt_if = _GEN ? r_uop_1_bp_xcpt_if : ren1_uops_1_bp_xcpt_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_debug_fsrc = _GEN ? r_uop_1_debug_fsrc : ren1_uops_1_debug_fsrc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_1_debug_tsrc = _GEN ? r_uop_1_debug_tsrc : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
wire [6:0] r_uop_newuop_1_uopc = r_uop_bypassed_uop_1_uopc; // @[util.scala:73:26]
wire [31:0] r_uop_newuop_1_inst = r_uop_bypassed_uop_1_inst; // @[util.scala:73:26]
wire [31:0] r_uop_newuop_1_debug_inst = r_uop_bypassed_uop_1_debug_inst; // @[util.scala:73:26]
wire r_uop_newuop_1_is_rvc = r_uop_bypassed_uop_1_is_rvc; // @[util.scala:73:26]
wire [39:0] r_uop_newuop_1_debug_pc = r_uop_bypassed_uop_1_debug_pc; // @[util.scala:73:26]
wire [2:0] r_uop_newuop_1_iq_type = r_uop_bypassed_uop_1_iq_type; // @[util.scala:73:26]
wire [9:0] r_uop_newuop_1_fu_code = r_uop_bypassed_uop_1_fu_code; // @[util.scala:73:26]
wire [3:0] r_uop_newuop_1_ctrl_br_type = r_uop_bypassed_uop_1_ctrl_br_type; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_1_ctrl_op1_sel = r_uop_bypassed_uop_1_ctrl_op1_sel; // @[util.scala:73:26]
wire [2:0] r_uop_newuop_1_ctrl_op2_sel = r_uop_bypassed_uop_1_ctrl_op2_sel; // @[util.scala:73:26]
wire [2:0] r_uop_newuop_1_ctrl_imm_sel = r_uop_bypassed_uop_1_ctrl_imm_sel; // @[util.scala:73:26]
wire [4:0] r_uop_newuop_1_ctrl_op_fcn = r_uop_bypassed_uop_1_ctrl_op_fcn; // @[util.scala:73:26]
wire r_uop_newuop_1_ctrl_fcn_dw = r_uop_bypassed_uop_1_ctrl_fcn_dw; // @[util.scala:73:26]
wire [2:0] r_uop_newuop_1_ctrl_csr_cmd = r_uop_bypassed_uop_1_ctrl_csr_cmd; // @[util.scala:73:26]
wire r_uop_newuop_1_ctrl_is_load = r_uop_bypassed_uop_1_ctrl_is_load; // @[util.scala:73:26]
wire r_uop_newuop_1_ctrl_is_sta = r_uop_bypassed_uop_1_ctrl_is_sta; // @[util.scala:73:26]
wire r_uop_newuop_1_ctrl_is_std = r_uop_bypassed_uop_1_ctrl_is_std; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_1_iw_state = r_uop_bypassed_uop_1_iw_state; // @[util.scala:73:26]
wire r_uop_newuop_1_iw_p1_poisoned = r_uop_bypassed_uop_1_iw_p1_poisoned; // @[util.scala:73:26]
wire r_uop_newuop_1_iw_p2_poisoned = r_uop_bypassed_uop_1_iw_p2_poisoned; // @[util.scala:73:26]
wire r_uop_newuop_1_is_br = r_uop_bypassed_uop_1_is_br; // @[util.scala:73:26]
wire r_uop_newuop_1_is_jalr = r_uop_bypassed_uop_1_is_jalr; // @[util.scala:73:26]
wire r_uop_newuop_1_is_jal = r_uop_bypassed_uop_1_is_jal; // @[util.scala:73:26]
wire r_uop_newuop_1_is_sfb = r_uop_bypassed_uop_1_is_sfb; // @[util.scala:73:26]
wire [3:0] r_uop_newuop_1_br_tag = r_uop_bypassed_uop_1_br_tag; // @[util.scala:73:26]
wire [4:0] r_uop_newuop_1_ftq_idx = r_uop_bypassed_uop_1_ftq_idx; // @[util.scala:73:26]
wire r_uop_newuop_1_edge_inst = r_uop_bypassed_uop_1_edge_inst; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_1_pc_lob = r_uop_bypassed_uop_1_pc_lob; // @[util.scala:73:26]
wire r_uop_newuop_1_taken = r_uop_bypassed_uop_1_taken; // @[util.scala:73:26]
wire [19:0] r_uop_newuop_1_imm_packed = r_uop_bypassed_uop_1_imm_packed; // @[util.scala:73:26]
wire [11:0] r_uop_newuop_1_csr_addr = r_uop_bypassed_uop_1_csr_addr; // @[util.scala:73:26]
wire [6:0] r_uop_newuop_1_rob_idx = r_uop_bypassed_uop_1_rob_idx; // @[util.scala:73:26]
wire [4:0] r_uop_newuop_1_ldq_idx = r_uop_bypassed_uop_1_ldq_idx; // @[util.scala:73:26]
wire [4:0] r_uop_newuop_1_stq_idx = r_uop_bypassed_uop_1_stq_idx; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_1_rxq_idx = r_uop_bypassed_uop_1_rxq_idx; // @[util.scala:73:26]
wire [6:0] r_uop_newuop_1_pdst = r_uop_bypassed_uop_1_pdst; // @[util.scala:73:26]
wire [6:0] r_uop_newuop_1_prs1 = r_uop_bypassed_uop_1_prs1; // @[util.scala:73:26]
wire [6:0] r_uop_newuop_1_prs2 = r_uop_bypassed_uop_1_prs2; // @[util.scala:73:26]
wire [6:0] r_uop_newuop_1_prs3 = r_uop_bypassed_uop_1_prs3; // @[util.scala:73:26]
wire _r_uop_bypassed_uop_prs1_busy_T_1; // @[rename-stage.scala:199:45]
wire [4:0] r_uop_newuop_1_ppred = r_uop_bypassed_uop_1_ppred; // @[util.scala:73:26]
wire _r_uop_bypassed_uop_prs2_busy_T_1; // @[rename-stage.scala:200:45]
wire r_uop_newuop_1_prs1_busy = r_uop_bypassed_uop_1_prs1_busy; // @[util.scala:73:26]
wire _r_uop_bypassed_uop_prs3_busy_T_1; // @[rename-stage.scala:201:45]
wire r_uop_newuop_1_prs2_busy = r_uop_bypassed_uop_1_prs2_busy; // @[util.scala:73:26]
wire r_uop_newuop_1_prs3_busy = r_uop_bypassed_uop_1_prs3_busy; // @[util.scala:73:26]
wire r_uop_newuop_1_ppred_busy = r_uop_bypassed_uop_1_ppred_busy; // @[util.scala:73:26]
wire [6:0] r_uop_newuop_1_stale_pdst = r_uop_bypassed_uop_1_stale_pdst; // @[util.scala:73:26]
wire r_uop_newuop_1_exception = r_uop_bypassed_uop_1_exception; // @[util.scala:73:26]
wire [63:0] r_uop_newuop_1_exc_cause = r_uop_bypassed_uop_1_exc_cause; // @[util.scala:73:26]
wire r_uop_newuop_1_bypassable = r_uop_bypassed_uop_1_bypassable; // @[util.scala:73:26]
wire [4:0] r_uop_newuop_1_mem_cmd = r_uop_bypassed_uop_1_mem_cmd; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_1_mem_size = r_uop_bypassed_uop_1_mem_size; // @[util.scala:73:26]
wire r_uop_newuop_1_mem_signed = r_uop_bypassed_uop_1_mem_signed; // @[util.scala:73:26]
wire r_uop_newuop_1_is_fence = r_uop_bypassed_uop_1_is_fence; // @[util.scala:73:26]
wire r_uop_newuop_1_is_fencei = r_uop_bypassed_uop_1_is_fencei; // @[util.scala:73:26]
wire r_uop_newuop_1_is_amo = r_uop_bypassed_uop_1_is_amo; // @[util.scala:73:26]
wire r_uop_newuop_1_uses_ldq = r_uop_bypassed_uop_1_uses_ldq; // @[util.scala:73:26]
wire r_uop_newuop_1_uses_stq = r_uop_bypassed_uop_1_uses_stq; // @[util.scala:73:26]
wire r_uop_newuop_1_is_sys_pc2epc = r_uop_bypassed_uop_1_is_sys_pc2epc; // @[util.scala:73:26]
wire r_uop_newuop_1_is_unique = r_uop_bypassed_uop_1_is_unique; // @[util.scala:73:26]
wire r_uop_newuop_1_flush_on_commit = r_uop_bypassed_uop_1_flush_on_commit; // @[util.scala:73:26]
wire r_uop_newuop_1_ldst_is_rs1 = r_uop_bypassed_uop_1_ldst_is_rs1; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_1_ldst = r_uop_bypassed_uop_1_ldst; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_1_lrs1 = r_uop_bypassed_uop_1_lrs1; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_1_lrs2 = r_uop_bypassed_uop_1_lrs2; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_1_lrs3 = r_uop_bypassed_uop_1_lrs3; // @[util.scala:73:26]
wire r_uop_newuop_1_ldst_val = r_uop_bypassed_uop_1_ldst_val; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_1_dst_rtype = r_uop_bypassed_uop_1_dst_rtype; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_1_lrs1_rtype = r_uop_bypassed_uop_1_lrs1_rtype; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_1_lrs2_rtype = r_uop_bypassed_uop_1_lrs2_rtype; // @[util.scala:73:26]
wire r_uop_newuop_1_frs3_en = r_uop_bypassed_uop_1_frs3_en; // @[util.scala:73:26]
wire r_uop_newuop_1_fp_val = r_uop_bypassed_uop_1_fp_val; // @[util.scala:73:26]
wire r_uop_newuop_1_fp_single = r_uop_bypassed_uop_1_fp_single; // @[util.scala:73:26]
wire r_uop_newuop_1_xcpt_pf_if = r_uop_bypassed_uop_1_xcpt_pf_if; // @[util.scala:73:26]
wire r_uop_newuop_1_xcpt_ae_if = r_uop_bypassed_uop_1_xcpt_ae_if; // @[util.scala:73:26]
wire r_uop_newuop_1_xcpt_ma_if = r_uop_bypassed_uop_1_xcpt_ma_if; // @[util.scala:73:26]
wire r_uop_newuop_1_bp_debug_if = r_uop_bypassed_uop_1_bp_debug_if; // @[util.scala:73:26]
wire r_uop_newuop_1_bp_xcpt_if = r_uop_bypassed_uop_1_bp_xcpt_if; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_1_debug_fsrc = r_uop_bypassed_uop_1_debug_fsrc; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_1_debug_tsrc = r_uop_bypassed_uop_1_debug_tsrc; // @[util.scala:73:26]
wire _r_uop_bypass_hits_rs1_T_3 = ren2_uops_0_ldst == next_uop_1_lrs1; // @[rename-stage.scala:108:29, :123:24, :177:87]
wire r_uop_bypass_hits_rs1_0_1 = ren2_alloc_reqs_0 & _r_uop_bypass_hits_rs1_T_3; // @[rename-stage.scala:109:29, :177:{77,87}]
wire _r_uop_bypass_hits_rs1_T_4 = ren2_uops_1_ldst == next_uop_1_lrs1; // @[rename-stage.scala:108:29, :123:24, :177:87]
wire r_uop_bypass_hits_rs1_1_1 = ren2_alloc_reqs_1 & _r_uop_bypass_hits_rs1_T_4; // @[rename-stage.scala:109:29, :177:{77,87}]
wire _r_uop_bypass_hits_rs1_T_5 = ren2_uops_2_ldst == next_uop_1_lrs1; // @[rename-stage.scala:108:29, :123:24, :177:87]
wire r_uop_bypass_hits_rs1_2_1 = ren2_alloc_reqs_2 & _r_uop_bypass_hits_rs1_T_5; // @[rename-stage.scala:109:29, :177:{77,87}]
wire _r_uop_bypass_hits_rs2_T_3 = ren2_uops_0_ldst == next_uop_1_lrs2; // @[rename-stage.scala:108:29, :123:24, :178:87]
wire r_uop_bypass_hits_rs2_0_1 = ren2_alloc_reqs_0 & _r_uop_bypass_hits_rs2_T_3; // @[rename-stage.scala:109:29, :178:{77,87}]
wire _r_uop_bypass_hits_rs2_T_4 = ren2_uops_1_ldst == next_uop_1_lrs2; // @[rename-stage.scala:108:29, :123:24, :178:87]
wire r_uop_bypass_hits_rs2_1_1 = ren2_alloc_reqs_1 & _r_uop_bypass_hits_rs2_T_4; // @[rename-stage.scala:109:29, :178:{77,87}]
wire _r_uop_bypass_hits_rs2_T_5 = ren2_uops_2_ldst == next_uop_1_lrs2; // @[rename-stage.scala:108:29, :123:24, :178:87]
wire r_uop_bypass_hits_rs2_2_1 = ren2_alloc_reqs_2 & _r_uop_bypass_hits_rs2_T_5; // @[rename-stage.scala:109:29, :178:{77,87}]
wire _r_uop_bypass_hits_rs3_T_3 = ren2_uops_0_ldst == next_uop_1_lrs3; // @[rename-stage.scala:108:29, :123:24, :179:87]
wire r_uop_bypass_hits_rs3_0_1 = ren2_alloc_reqs_0 & _r_uop_bypass_hits_rs3_T_3; // @[rename-stage.scala:109:29, :179:{77,87}]
wire _r_uop_bypass_hits_rs3_T_4 = ren2_uops_1_ldst == next_uop_1_lrs3; // @[rename-stage.scala:108:29, :123:24, :179:87]
wire r_uop_bypass_hits_rs3_1_1 = ren2_alloc_reqs_1 & _r_uop_bypass_hits_rs3_T_4; // @[rename-stage.scala:109:29, :179:{77,87}]
wire _r_uop_bypass_hits_rs3_T_5 = ren2_uops_2_ldst == next_uop_1_lrs3; // @[rename-stage.scala:108:29, :123:24, :179:87]
wire r_uop_bypass_hits_rs3_2_1 = ren2_alloc_reqs_2 & _r_uop_bypass_hits_rs3_T_5; // @[rename-stage.scala:109:29, :179:{77,87}]
wire _r_uop_bypass_hits_dst_T_3 = ren2_uops_0_ldst == next_uop_1_ldst; // @[rename-stage.scala:108:29, :123:24, :180:87]
wire r_uop_bypass_hits_dst_0_1 = ren2_alloc_reqs_0 & _r_uop_bypass_hits_dst_T_3; // @[rename-stage.scala:109:29, :180:{77,87}]
wire _r_uop_bypass_hits_dst_T_4 = ren2_uops_1_ldst == next_uop_1_ldst; // @[rename-stage.scala:108:29, :123:24, :180:87]
wire r_uop_bypass_hits_dst_1_1 = ren2_alloc_reqs_1 & _r_uop_bypass_hits_dst_T_4; // @[rename-stage.scala:109:29, :180:{77,87}]
wire _r_uop_bypass_hits_dst_T_5 = ren2_uops_2_ldst == next_uop_1_ldst; // @[rename-stage.scala:108:29, :123:24, :180:87]
wire r_uop_bypass_hits_dst_2_1 = ren2_alloc_reqs_2 & _r_uop_bypass_hits_dst_T_5; // @[rename-stage.scala:109:29, :180:{77,87}]
wire [2:0] _r_uop_bypass_sel_rs1_enc_T_2 = {r_uop_bypass_hits_rs1_0_1, 2'h0}; // @[Mux.scala:50:70]
wire [2:0] _r_uop_bypass_sel_rs1_enc_T_3 = r_uop_bypass_hits_rs1_1_1 ? 3'h2 : _r_uop_bypass_sel_rs1_enc_T_2; // @[Mux.scala:50:70]
wire [2:0] r_uop_bypass_sel_rs1_enc_1 = r_uop_bypass_hits_rs1_2_1 ? 3'h1 : _r_uop_bypass_sel_rs1_enc_T_3; // @[Mux.scala:50:70]
wire r_uop_bypass_sel_rs1_2_1 = r_uop_bypass_sel_rs1_enc_1[0]; // @[OneHot.scala:83:30]
wire r_uop_bypass_sel_rs1_1_1 = r_uop_bypass_sel_rs1_enc_1[1]; // @[OneHot.scala:83:30]
wire r_uop_bypass_sel_rs1_0_1 = r_uop_bypass_sel_rs1_enc_1[2]; // @[OneHot.scala:83:30]
wire [2:0] _r_uop_bypass_sel_rs2_enc_T_2 = {r_uop_bypass_hits_rs2_0_1, 2'h0}; // @[Mux.scala:50:70]
wire [2:0] _r_uop_bypass_sel_rs2_enc_T_3 = r_uop_bypass_hits_rs2_1_1 ? 3'h2 : _r_uop_bypass_sel_rs2_enc_T_2; // @[Mux.scala:50:70]
wire [2:0] r_uop_bypass_sel_rs2_enc_1 = r_uop_bypass_hits_rs2_2_1 ? 3'h1 : _r_uop_bypass_sel_rs2_enc_T_3; // @[Mux.scala:50:70]
wire r_uop_bypass_sel_rs2_2_1 = r_uop_bypass_sel_rs2_enc_1[0]; // @[OneHot.scala:83:30]
wire r_uop_bypass_sel_rs2_1_1 = r_uop_bypass_sel_rs2_enc_1[1]; // @[OneHot.scala:83:30]
wire r_uop_bypass_sel_rs2_0_1 = r_uop_bypass_sel_rs2_enc_1[2]; // @[OneHot.scala:83:30]
wire [2:0] _r_uop_bypass_sel_rs3_enc_T_2 = {r_uop_bypass_hits_rs3_0_1, 2'h0}; // @[Mux.scala:50:70]
wire [2:0] _r_uop_bypass_sel_rs3_enc_T_3 = r_uop_bypass_hits_rs3_1_1 ? 3'h2 : _r_uop_bypass_sel_rs3_enc_T_2; // @[Mux.scala:50:70]
wire [2:0] r_uop_bypass_sel_rs3_enc_1 = r_uop_bypass_hits_rs3_2_1 ? 3'h1 : _r_uop_bypass_sel_rs3_enc_T_3; // @[Mux.scala:50:70]
wire r_uop_bypass_sel_rs3_2_1 = r_uop_bypass_sel_rs3_enc_1[0]; // @[OneHot.scala:83:30]
wire r_uop_bypass_sel_rs3_1_1 = r_uop_bypass_sel_rs3_enc_1[1]; // @[OneHot.scala:83:30]
wire r_uop_bypass_sel_rs3_0_1 = r_uop_bypass_sel_rs3_enc_1[2]; // @[OneHot.scala:83:30]
wire [2:0] _r_uop_bypass_sel_dst_enc_T_2 = {r_uop_bypass_hits_dst_0_1, 2'h0}; // @[Mux.scala:50:70]
wire [2:0] _r_uop_bypass_sel_dst_enc_T_3 = r_uop_bypass_hits_dst_1_1 ? 3'h2 : _r_uop_bypass_sel_dst_enc_T_2; // @[Mux.scala:50:70]
wire [2:0] r_uop_bypass_sel_dst_enc_1 = r_uop_bypass_hits_dst_2_1 ? 3'h1 : _r_uop_bypass_sel_dst_enc_T_3; // @[Mux.scala:50:70]
wire r_uop_bypass_sel_dst_2_1 = r_uop_bypass_sel_dst_enc_1[0]; // @[OneHot.scala:83:30]
wire r_uop_bypass_sel_dst_1_1 = r_uop_bypass_sel_dst_enc_1[1]; // @[OneHot.scala:83:30]
wire r_uop_bypass_sel_dst_0_1 = r_uop_bypass_sel_dst_enc_1[2]; // @[OneHot.scala:83:30]
wire _r_uop_do_bypass_rs1_T_1 = r_uop_bypass_hits_rs1_0_1 | r_uop_bypass_hits_rs1_1_1; // @[rename-stage.scala:177:77, :187:49]
wire r_uop_do_bypass_rs1_1 = _r_uop_do_bypass_rs1_T_1 | r_uop_bypass_hits_rs1_2_1; // @[rename-stage.scala:177:77, :187:49]
wire _r_uop_do_bypass_rs2_T_1 = r_uop_bypass_hits_rs2_0_1 | r_uop_bypass_hits_rs2_1_1; // @[rename-stage.scala:178:77, :188:49]
wire r_uop_do_bypass_rs2_1 = _r_uop_do_bypass_rs2_T_1 | r_uop_bypass_hits_rs2_2_1; // @[rename-stage.scala:178:77, :188:49]
wire _r_uop_do_bypass_rs3_T_1 = r_uop_bypass_hits_rs3_0_1 | r_uop_bypass_hits_rs3_1_1; // @[rename-stage.scala:179:77, :189:49]
wire r_uop_do_bypass_rs3_1 = _r_uop_do_bypass_rs3_T_1 | r_uop_bypass_hits_rs3_2_1; // @[rename-stage.scala:179:77, :189:49]
wire _r_uop_do_bypass_dst_T_1 = r_uop_bypass_hits_dst_0_1 | r_uop_bypass_hits_dst_1_1; // @[rename-stage.scala:180:77, :190:49]
wire r_uop_do_bypass_dst_1 = _r_uop_do_bypass_dst_T_1 | r_uop_bypass_hits_dst_2_1; // @[rename-stage.scala:180:77, :190:49]
wire [6:0] _r_uop_bypassed_uop_prs1_T_5 = r_uop_bypass_sel_rs1_0_1 ? ren2_uops_0_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs1_T_6 = r_uop_bypass_sel_rs1_1_1 ? ren2_uops_1_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs1_T_7 = r_uop_bypass_sel_rs1_2_1 ? ren2_uops_2_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs1_T_8 = _r_uop_bypassed_uop_prs1_T_5 | _r_uop_bypassed_uop_prs1_T_6; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_prs1_T_9 = _r_uop_bypassed_uop_prs1_T_8 | _r_uop_bypassed_uop_prs1_T_7; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_prs1_WIRE_1 = _r_uop_bypassed_uop_prs1_T_9; // @[Mux.scala:30:73]
assign r_uop_bypassed_uop_1_prs1 = r_uop_do_bypass_rs1_1 ? _r_uop_bypassed_uop_prs1_WIRE_1 : next_uop_1_prs1; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_prs2_T_5 = r_uop_bypass_sel_rs2_0_1 ? ren2_uops_0_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs2_T_6 = r_uop_bypass_sel_rs2_1_1 ? ren2_uops_1_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs2_T_7 = r_uop_bypass_sel_rs2_2_1 ? ren2_uops_2_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs2_T_8 = _r_uop_bypassed_uop_prs2_T_5 | _r_uop_bypassed_uop_prs2_T_6; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_prs2_T_9 = _r_uop_bypassed_uop_prs2_T_8 | _r_uop_bypassed_uop_prs2_T_7; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_prs2_WIRE_1 = _r_uop_bypassed_uop_prs2_T_9; // @[Mux.scala:30:73]
assign r_uop_bypassed_uop_1_prs2 = r_uop_do_bypass_rs2_1 ? _r_uop_bypassed_uop_prs2_WIRE_1 : next_uop_1_prs2; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_prs3_T_5 = r_uop_bypass_sel_rs3_0_1 ? ren2_uops_0_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs3_T_6 = r_uop_bypass_sel_rs3_1_1 ? ren2_uops_1_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs3_T_7 = r_uop_bypass_sel_rs3_2_1 ? ren2_uops_2_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs3_T_8 = _r_uop_bypassed_uop_prs3_T_5 | _r_uop_bypassed_uop_prs3_T_6; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_prs3_T_9 = _r_uop_bypassed_uop_prs3_T_8 | _r_uop_bypassed_uop_prs3_T_7; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_prs3_WIRE_1 = _r_uop_bypassed_uop_prs3_T_9; // @[Mux.scala:30:73]
assign r_uop_bypassed_uop_1_prs3 = r_uop_do_bypass_rs3_1 ? _r_uop_bypassed_uop_prs3_WIRE_1 : next_uop_1_prs3; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_stale_pdst_T_5 = r_uop_bypass_sel_dst_0_1 ? ren2_uops_0_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_stale_pdst_T_6 = r_uop_bypass_sel_dst_1_1 ? ren2_uops_1_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_stale_pdst_T_7 = r_uop_bypass_sel_dst_2_1 ? ren2_uops_2_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_stale_pdst_T_8 = _r_uop_bypassed_uop_stale_pdst_T_5 | _r_uop_bypassed_uop_stale_pdst_T_6; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_stale_pdst_T_9 = _r_uop_bypassed_uop_stale_pdst_T_8 | _r_uop_bypassed_uop_stale_pdst_T_7; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_stale_pdst_WIRE_1 = _r_uop_bypassed_uop_stale_pdst_T_9; // @[Mux.scala:30:73]
assign r_uop_bypassed_uop_1_stale_pdst = r_uop_do_bypass_dst_1 ? _r_uop_bypassed_uop_stale_pdst_WIRE_1 : next_uop_1_stale_pdst; // @[Mux.scala:30:73]
assign _r_uop_bypassed_uop_prs1_busy_T_1 = next_uop_1_prs1_busy | r_uop_do_bypass_rs1_1; // @[rename-stage.scala:123:24, :187:49, :199:45]
assign r_uop_bypassed_uop_1_prs1_busy = _r_uop_bypassed_uop_prs1_busy_T_1; // @[rename-stage.scala:174:28, :199:45]
assign _r_uop_bypassed_uop_prs2_busy_T_1 = next_uop_1_prs2_busy | r_uop_do_bypass_rs2_1; // @[rename-stage.scala:123:24, :188:49, :200:45]
assign r_uop_bypassed_uop_1_prs2_busy = _r_uop_bypassed_uop_prs2_busy_T_1; // @[rename-stage.scala:174:28, :200:45]
assign _r_uop_bypassed_uop_prs3_busy_T_1 = next_uop_1_prs3_busy | r_uop_do_bypass_rs3_1; // @[rename-stage.scala:123:24, :189:49, :201:45]
assign r_uop_bypassed_uop_1_prs3_busy = _r_uop_bypassed_uop_prs3_busy_T_1; // @[rename-stage.scala:174:28, :201:45]
wire [15:0] _r_uop_newuop_br_mask_T_3; // @[util.scala:74:35]
wire [15:0] r_uop_newuop_1_br_mask; // @[util.scala:73:26]
wire [15:0] _r_uop_newuop_br_mask_T_2 = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:74:37]
assign _r_uop_newuop_br_mask_T_3 = r_uop_bypassed_uop_1_br_mask & _r_uop_newuop_br_mask_T_2; // @[util.scala:74:{35,37}]
assign r_uop_newuop_1_br_mask = _r_uop_newuop_br_mask_T_3; // @[util.scala:73:26, :74:35]
reg r_valid_2; // @[rename-stage.scala:121:27]
assign ren2_valids_2 = r_valid_2; // @[rename-stage.scala:107:29, :121:27]
reg [6:0] r_uop_2_uopc; // @[rename-stage.scala:122:23]
assign ren2_uops_2_uopc = r_uop_2_uopc; // @[rename-stage.scala:108:29, :122:23]
reg [31:0] r_uop_2_inst; // @[rename-stage.scala:122:23]
assign ren2_uops_2_inst = r_uop_2_inst; // @[rename-stage.scala:108:29, :122:23]
reg [31:0] r_uop_2_debug_inst; // @[rename-stage.scala:122:23]
assign ren2_uops_2_debug_inst = r_uop_2_debug_inst; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_is_rvc; // @[rename-stage.scala:122:23]
assign ren2_uops_2_is_rvc = r_uop_2_is_rvc; // @[rename-stage.scala:108:29, :122:23]
reg [39:0] r_uop_2_debug_pc; // @[rename-stage.scala:122:23]
assign ren2_uops_2_debug_pc = r_uop_2_debug_pc; // @[rename-stage.scala:108:29, :122:23]
reg [2:0] r_uop_2_iq_type; // @[rename-stage.scala:122:23]
assign ren2_uops_2_iq_type = r_uop_2_iq_type; // @[rename-stage.scala:108:29, :122:23]
reg [9:0] r_uop_2_fu_code; // @[rename-stage.scala:122:23]
assign ren2_uops_2_fu_code = r_uop_2_fu_code; // @[rename-stage.scala:108:29, :122:23]
reg [3:0] r_uop_2_ctrl_br_type; // @[rename-stage.scala:122:23]
assign ren2_uops_2_ctrl_br_type = r_uop_2_ctrl_br_type; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_2_ctrl_op1_sel; // @[rename-stage.scala:122:23]
assign ren2_uops_2_ctrl_op1_sel = r_uop_2_ctrl_op1_sel; // @[rename-stage.scala:108:29, :122:23]
reg [2:0] r_uop_2_ctrl_op2_sel; // @[rename-stage.scala:122:23]
assign ren2_uops_2_ctrl_op2_sel = r_uop_2_ctrl_op2_sel; // @[rename-stage.scala:108:29, :122:23]
reg [2:0] r_uop_2_ctrl_imm_sel; // @[rename-stage.scala:122:23]
assign ren2_uops_2_ctrl_imm_sel = r_uop_2_ctrl_imm_sel; // @[rename-stage.scala:108:29, :122:23]
reg [4:0] r_uop_2_ctrl_op_fcn; // @[rename-stage.scala:122:23]
assign ren2_uops_2_ctrl_op_fcn = r_uop_2_ctrl_op_fcn; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_ctrl_fcn_dw; // @[rename-stage.scala:122:23]
assign ren2_uops_2_ctrl_fcn_dw = r_uop_2_ctrl_fcn_dw; // @[rename-stage.scala:108:29, :122:23]
reg [2:0] r_uop_2_ctrl_csr_cmd; // @[rename-stage.scala:122:23]
assign ren2_uops_2_ctrl_csr_cmd = r_uop_2_ctrl_csr_cmd; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_ctrl_is_load; // @[rename-stage.scala:122:23]
assign ren2_uops_2_ctrl_is_load = r_uop_2_ctrl_is_load; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_ctrl_is_sta; // @[rename-stage.scala:122:23]
assign ren2_uops_2_ctrl_is_sta = r_uop_2_ctrl_is_sta; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_ctrl_is_std; // @[rename-stage.scala:122:23]
assign ren2_uops_2_ctrl_is_std = r_uop_2_ctrl_is_std; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_2_iw_state; // @[rename-stage.scala:122:23]
assign ren2_uops_2_iw_state = r_uop_2_iw_state; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_iw_p1_poisoned; // @[rename-stage.scala:122:23]
assign ren2_uops_2_iw_p1_poisoned = r_uop_2_iw_p1_poisoned; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_iw_p2_poisoned; // @[rename-stage.scala:122:23]
assign ren2_uops_2_iw_p2_poisoned = r_uop_2_iw_p2_poisoned; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_is_br; // @[rename-stage.scala:122:23]
assign ren2_uops_2_is_br = r_uop_2_is_br; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_is_jalr; // @[rename-stage.scala:122:23]
assign ren2_uops_2_is_jalr = r_uop_2_is_jalr; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_is_jal; // @[rename-stage.scala:122:23]
assign ren2_uops_2_is_jal = r_uop_2_is_jal; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_is_sfb; // @[rename-stage.scala:122:23]
assign ren2_uops_2_is_sfb = r_uop_2_is_sfb; // @[rename-stage.scala:108:29, :122:23]
reg [15:0] r_uop_2_br_mask; // @[rename-stage.scala:122:23]
assign ren2_uops_2_br_mask = r_uop_2_br_mask; // @[rename-stage.scala:108:29, :122:23]
reg [3:0] r_uop_2_br_tag; // @[rename-stage.scala:122:23]
assign ren2_uops_2_br_tag = r_uop_2_br_tag; // @[rename-stage.scala:108:29, :122:23]
reg [4:0] r_uop_2_ftq_idx; // @[rename-stage.scala:122:23]
assign ren2_uops_2_ftq_idx = r_uop_2_ftq_idx; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_edge_inst; // @[rename-stage.scala:122:23]
assign ren2_uops_2_edge_inst = r_uop_2_edge_inst; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_2_pc_lob; // @[rename-stage.scala:122:23]
assign ren2_uops_2_pc_lob = r_uop_2_pc_lob; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_taken; // @[rename-stage.scala:122:23]
assign ren2_uops_2_taken = r_uop_2_taken; // @[rename-stage.scala:108:29, :122:23]
reg [19:0] r_uop_2_imm_packed; // @[rename-stage.scala:122:23]
assign ren2_uops_2_imm_packed = r_uop_2_imm_packed; // @[rename-stage.scala:108:29, :122:23]
reg [11:0] r_uop_2_csr_addr; // @[rename-stage.scala:122:23]
assign ren2_uops_2_csr_addr = r_uop_2_csr_addr; // @[rename-stage.scala:108:29, :122:23]
reg [6:0] r_uop_2_rob_idx; // @[rename-stage.scala:122:23]
assign ren2_uops_2_rob_idx = r_uop_2_rob_idx; // @[rename-stage.scala:108:29, :122:23]
reg [4:0] r_uop_2_ldq_idx; // @[rename-stage.scala:122:23]
assign ren2_uops_2_ldq_idx = r_uop_2_ldq_idx; // @[rename-stage.scala:108:29, :122:23]
reg [4:0] r_uop_2_stq_idx; // @[rename-stage.scala:122:23]
assign ren2_uops_2_stq_idx = r_uop_2_stq_idx; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_2_rxq_idx; // @[rename-stage.scala:122:23]
assign ren2_uops_2_rxq_idx = r_uop_2_rxq_idx; // @[rename-stage.scala:108:29, :122:23]
reg [6:0] r_uop_2_pdst; // @[rename-stage.scala:122:23]
reg [6:0] r_uop_2_prs1; // @[rename-stage.scala:122:23]
assign ren2_uops_2_prs1 = r_uop_2_prs1; // @[rename-stage.scala:108:29, :122:23]
reg [6:0] r_uop_2_prs2; // @[rename-stage.scala:122:23]
assign ren2_uops_2_prs2 = r_uop_2_prs2; // @[rename-stage.scala:108:29, :122:23]
reg [6:0] r_uop_2_prs3; // @[rename-stage.scala:122:23]
assign ren2_uops_2_prs3 = r_uop_2_prs3; // @[rename-stage.scala:108:29, :122:23]
reg [4:0] r_uop_2_ppred; // @[rename-stage.scala:122:23]
assign ren2_uops_2_ppred = r_uop_2_ppred; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_prs1_busy; // @[rename-stage.scala:122:23]
reg r_uop_2_prs2_busy; // @[rename-stage.scala:122:23]
reg r_uop_2_prs3_busy; // @[rename-stage.scala:122:23]
reg r_uop_2_ppred_busy; // @[rename-stage.scala:122:23]
assign ren2_uops_2_ppred_busy = r_uop_2_ppred_busy; // @[rename-stage.scala:108:29, :122:23]
reg [6:0] r_uop_2_stale_pdst; // @[rename-stage.scala:122:23]
assign ren2_uops_2_stale_pdst = r_uop_2_stale_pdst; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_exception; // @[rename-stage.scala:122:23]
assign ren2_uops_2_exception = r_uop_2_exception; // @[rename-stage.scala:108:29, :122:23]
reg [63:0] r_uop_2_exc_cause; // @[rename-stage.scala:122:23]
assign ren2_uops_2_exc_cause = r_uop_2_exc_cause; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_bypassable; // @[rename-stage.scala:122:23]
assign ren2_uops_2_bypassable = r_uop_2_bypassable; // @[rename-stage.scala:108:29, :122:23]
reg [4:0] r_uop_2_mem_cmd; // @[rename-stage.scala:122:23]
assign ren2_uops_2_mem_cmd = r_uop_2_mem_cmd; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_2_mem_size; // @[rename-stage.scala:122:23]
assign ren2_uops_2_mem_size = r_uop_2_mem_size; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_mem_signed; // @[rename-stage.scala:122:23]
assign ren2_uops_2_mem_signed = r_uop_2_mem_signed; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_is_fence; // @[rename-stage.scala:122:23]
assign ren2_uops_2_is_fence = r_uop_2_is_fence; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_is_fencei; // @[rename-stage.scala:122:23]
assign ren2_uops_2_is_fencei = r_uop_2_is_fencei; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_is_amo; // @[rename-stage.scala:122:23]
assign ren2_uops_2_is_amo = r_uop_2_is_amo; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_uses_ldq; // @[rename-stage.scala:122:23]
assign ren2_uops_2_uses_ldq = r_uop_2_uses_ldq; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_uses_stq; // @[rename-stage.scala:122:23]
assign ren2_uops_2_uses_stq = r_uop_2_uses_stq; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_is_sys_pc2epc; // @[rename-stage.scala:122:23]
assign ren2_uops_2_is_sys_pc2epc = r_uop_2_is_sys_pc2epc; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_is_unique; // @[rename-stage.scala:122:23]
assign ren2_uops_2_is_unique = r_uop_2_is_unique; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_flush_on_commit; // @[rename-stage.scala:122:23]
assign ren2_uops_2_flush_on_commit = r_uop_2_flush_on_commit; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_ldst_is_rs1; // @[rename-stage.scala:122:23]
assign ren2_uops_2_ldst_is_rs1 = r_uop_2_ldst_is_rs1; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_2_ldst; // @[rename-stage.scala:122:23]
assign ren2_uops_2_ldst = r_uop_2_ldst; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_2_lrs1; // @[rename-stage.scala:122:23]
assign ren2_uops_2_lrs1 = r_uop_2_lrs1; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_2_lrs2; // @[rename-stage.scala:122:23]
assign ren2_uops_2_lrs2 = r_uop_2_lrs2; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_2_lrs3; // @[rename-stage.scala:122:23]
assign ren2_uops_2_lrs3 = r_uop_2_lrs3; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_ldst_val; // @[rename-stage.scala:122:23]
assign ren2_uops_2_ldst_val = r_uop_2_ldst_val; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_2_dst_rtype; // @[rename-stage.scala:122:23]
assign ren2_uops_2_dst_rtype = r_uop_2_dst_rtype; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_2_lrs1_rtype; // @[rename-stage.scala:122:23]
assign ren2_uops_2_lrs1_rtype = r_uop_2_lrs1_rtype; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_2_lrs2_rtype; // @[rename-stage.scala:122:23]
assign ren2_uops_2_lrs2_rtype = r_uop_2_lrs2_rtype; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_frs3_en; // @[rename-stage.scala:122:23]
assign ren2_uops_2_frs3_en = r_uop_2_frs3_en; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_fp_val; // @[rename-stage.scala:122:23]
assign ren2_uops_2_fp_val = r_uop_2_fp_val; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_fp_single; // @[rename-stage.scala:122:23]
assign ren2_uops_2_fp_single = r_uop_2_fp_single; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_xcpt_pf_if; // @[rename-stage.scala:122:23]
assign ren2_uops_2_xcpt_pf_if = r_uop_2_xcpt_pf_if; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_xcpt_ae_if; // @[rename-stage.scala:122:23]
assign ren2_uops_2_xcpt_ae_if = r_uop_2_xcpt_ae_if; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_xcpt_ma_if; // @[rename-stage.scala:122:23]
assign ren2_uops_2_xcpt_ma_if = r_uop_2_xcpt_ma_if; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_bp_debug_if; // @[rename-stage.scala:122:23]
assign ren2_uops_2_bp_debug_if = r_uop_2_bp_debug_if; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_2_bp_xcpt_if; // @[rename-stage.scala:122:23]
assign ren2_uops_2_bp_xcpt_if = r_uop_2_bp_xcpt_if; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_2_debug_fsrc; // @[rename-stage.scala:122:23]
assign ren2_uops_2_debug_fsrc = r_uop_2_debug_fsrc; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_2_debug_tsrc; // @[rename-stage.scala:122:23]
assign ren2_uops_2_debug_tsrc = r_uop_2_debug_tsrc; // @[rename-stage.scala:108:29, :122:23]
wire [6:0] r_uop_bypassed_uop_2_uopc = next_uop_2_uopc; // @[rename-stage.scala:123:24, :174:28]
wire [31:0] r_uop_bypassed_uop_2_inst = next_uop_2_inst; // @[rename-stage.scala:123:24, :174:28]
wire [31:0] r_uop_bypassed_uop_2_debug_inst = next_uop_2_debug_inst; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_is_rvc = next_uop_2_is_rvc; // @[rename-stage.scala:123:24, :174:28]
wire [39:0] r_uop_bypassed_uop_2_debug_pc = next_uop_2_debug_pc; // @[rename-stage.scala:123:24, :174:28]
wire [2:0] r_uop_bypassed_uop_2_iq_type = next_uop_2_iq_type; // @[rename-stage.scala:123:24, :174:28]
wire [9:0] r_uop_bypassed_uop_2_fu_code = next_uop_2_fu_code; // @[rename-stage.scala:123:24, :174:28]
wire [3:0] r_uop_bypassed_uop_2_ctrl_br_type = next_uop_2_ctrl_br_type; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_2_ctrl_op1_sel = next_uop_2_ctrl_op1_sel; // @[rename-stage.scala:123:24, :174:28]
wire [2:0] r_uop_bypassed_uop_2_ctrl_op2_sel = next_uop_2_ctrl_op2_sel; // @[rename-stage.scala:123:24, :174:28]
wire [2:0] r_uop_bypassed_uop_2_ctrl_imm_sel = next_uop_2_ctrl_imm_sel; // @[rename-stage.scala:123:24, :174:28]
wire [4:0] r_uop_bypassed_uop_2_ctrl_op_fcn = next_uop_2_ctrl_op_fcn; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_ctrl_fcn_dw = next_uop_2_ctrl_fcn_dw; // @[rename-stage.scala:123:24, :174:28]
wire [2:0] r_uop_bypassed_uop_2_ctrl_csr_cmd = next_uop_2_ctrl_csr_cmd; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_ctrl_is_load = next_uop_2_ctrl_is_load; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_ctrl_is_sta = next_uop_2_ctrl_is_sta; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_ctrl_is_std = next_uop_2_ctrl_is_std; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_2_iw_state = next_uop_2_iw_state; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_iw_p1_poisoned = next_uop_2_iw_p1_poisoned; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_iw_p2_poisoned = next_uop_2_iw_p2_poisoned; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_is_br = next_uop_2_is_br; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_is_jalr = next_uop_2_is_jalr; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_is_jal = next_uop_2_is_jal; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_is_sfb = next_uop_2_is_sfb; // @[rename-stage.scala:123:24, :174:28]
wire [15:0] r_uop_bypassed_uop_2_br_mask = next_uop_2_br_mask; // @[rename-stage.scala:123:24, :174:28]
wire [3:0] r_uop_bypassed_uop_2_br_tag = next_uop_2_br_tag; // @[rename-stage.scala:123:24, :174:28]
wire [4:0] r_uop_bypassed_uop_2_ftq_idx = next_uop_2_ftq_idx; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_edge_inst = next_uop_2_edge_inst; // @[rename-stage.scala:123:24, :174:28]
wire [5:0] r_uop_bypassed_uop_2_pc_lob = next_uop_2_pc_lob; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_taken = next_uop_2_taken; // @[rename-stage.scala:123:24, :174:28]
wire [19:0] r_uop_bypassed_uop_2_imm_packed = next_uop_2_imm_packed; // @[rename-stage.scala:123:24, :174:28]
wire [11:0] r_uop_bypassed_uop_2_csr_addr = next_uop_2_csr_addr; // @[rename-stage.scala:123:24, :174:28]
wire [6:0] r_uop_bypassed_uop_2_rob_idx = next_uop_2_rob_idx; // @[rename-stage.scala:123:24, :174:28]
wire [4:0] r_uop_bypassed_uop_2_ldq_idx = next_uop_2_ldq_idx; // @[rename-stage.scala:123:24, :174:28]
wire [4:0] r_uop_bypassed_uop_2_stq_idx = next_uop_2_stq_idx; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_2_rxq_idx = next_uop_2_rxq_idx; // @[rename-stage.scala:123:24, :174:28]
wire [6:0] r_uop_bypassed_uop_2_pdst = next_uop_2_pdst; // @[rename-stage.scala:123:24, :174:28]
wire [4:0] r_uop_bypassed_uop_2_ppred = next_uop_2_ppred; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_ppred_busy = next_uop_2_ppred_busy; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_exception = next_uop_2_exception; // @[rename-stage.scala:123:24, :174:28]
wire [63:0] r_uop_bypassed_uop_2_exc_cause = next_uop_2_exc_cause; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_bypassable = next_uop_2_bypassable; // @[rename-stage.scala:123:24, :174:28]
wire [4:0] r_uop_bypassed_uop_2_mem_cmd = next_uop_2_mem_cmd; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_2_mem_size = next_uop_2_mem_size; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_mem_signed = next_uop_2_mem_signed; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_is_fence = next_uop_2_is_fence; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_is_fencei = next_uop_2_is_fencei; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_is_amo = next_uop_2_is_amo; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_uses_ldq = next_uop_2_uses_ldq; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_uses_stq = next_uop_2_uses_stq; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_is_sys_pc2epc = next_uop_2_is_sys_pc2epc; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_is_unique = next_uop_2_is_unique; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_flush_on_commit = next_uop_2_flush_on_commit; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_ldst_is_rs1 = next_uop_2_ldst_is_rs1; // @[rename-stage.scala:123:24, :174:28]
wire [5:0] r_uop_bypassed_uop_2_ldst = next_uop_2_ldst; // @[rename-stage.scala:123:24, :174:28]
wire [5:0] r_uop_bypassed_uop_2_lrs1 = next_uop_2_lrs1; // @[rename-stage.scala:123:24, :174:28]
wire [5:0] r_uop_bypassed_uop_2_lrs2 = next_uop_2_lrs2; // @[rename-stage.scala:123:24, :174:28]
wire [5:0] r_uop_bypassed_uop_2_lrs3 = next_uop_2_lrs3; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_ldst_val = next_uop_2_ldst_val; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_2_dst_rtype = next_uop_2_dst_rtype; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_2_lrs1_rtype = next_uop_2_lrs1_rtype; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_2_lrs2_rtype = next_uop_2_lrs2_rtype; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_frs3_en = next_uop_2_frs3_en; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_fp_val = next_uop_2_fp_val; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_fp_single = next_uop_2_fp_single; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_xcpt_pf_if = next_uop_2_xcpt_pf_if; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_xcpt_ae_if = next_uop_2_xcpt_ae_if; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_xcpt_ma_if = next_uop_2_xcpt_ma_if; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_bp_debug_if = next_uop_2_bp_debug_if; // @[rename-stage.scala:123:24, :174:28]
wire r_uop_bypassed_uop_2_bp_xcpt_if = next_uop_2_bp_xcpt_if; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_2_debug_fsrc = next_uop_2_debug_fsrc; // @[rename-stage.scala:123:24, :174:28]
wire [1:0] r_uop_bypassed_uop_2_debug_tsrc = next_uop_2_debug_tsrc; // @[rename-stage.scala:123:24, :174:28]
wire [6:0] next_uop_2_prs1; // @[rename-stage.scala:123:24]
wire [6:0] next_uop_2_prs2; // @[rename-stage.scala:123:24]
wire [6:0] next_uop_2_prs3; // @[rename-stage.scala:123:24]
wire next_uop_2_prs1_busy; // @[rename-stage.scala:123:24]
wire next_uop_2_prs2_busy; // @[rename-stage.scala:123:24]
wire next_uop_2_prs3_busy; // @[rename-stage.scala:123:24]
wire [6:0] next_uop_2_stale_pdst; // @[rename-stage.scala:123:24]
wire _r_valid_T_4 = ~io_dis_fire_2_0; // @[rename-stage.scala:133:29, :160:7]
wire _r_valid_T_5 = r_valid_2 & _r_valid_T_4; // @[rename-stage.scala:121:27, :133:{26,29}]
assign next_uop_2_uopc = _GEN ? r_uop_2_uopc : ren1_uops_2_uopc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_inst = _GEN ? r_uop_2_inst : ren1_uops_2_inst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_debug_inst = _GEN ? r_uop_2_debug_inst : ren1_uops_2_debug_inst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_is_rvc = _GEN ? r_uop_2_is_rvc : ren1_uops_2_is_rvc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_debug_pc = _GEN ? r_uop_2_debug_pc : ren1_uops_2_debug_pc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_iq_type = _GEN ? r_uop_2_iq_type : ren1_uops_2_iq_type; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_fu_code = _GEN ? r_uop_2_fu_code : ren1_uops_2_fu_code; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_ctrl_br_type = _GEN ? r_uop_2_ctrl_br_type : 4'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_ctrl_op1_sel = _GEN ? r_uop_2_ctrl_op1_sel : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_ctrl_op2_sel = _GEN ? r_uop_2_ctrl_op2_sel : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_ctrl_imm_sel = _GEN ? r_uop_2_ctrl_imm_sel : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_ctrl_op_fcn = _GEN ? r_uop_2_ctrl_op_fcn : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_ctrl_fcn_dw = _GEN & r_uop_2_ctrl_fcn_dw; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_ctrl_csr_cmd = _GEN ? r_uop_2_ctrl_csr_cmd : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_ctrl_is_load = _GEN & r_uop_2_ctrl_is_load; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_ctrl_is_sta = _GEN & r_uop_2_ctrl_is_sta; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_ctrl_is_std = _GEN & r_uop_2_ctrl_is_std; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_iw_state = _GEN ? r_uop_2_iw_state : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_iw_p1_poisoned = _GEN & r_uop_2_iw_p1_poisoned; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_iw_p2_poisoned = _GEN & r_uop_2_iw_p2_poisoned; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_is_br = _GEN ? r_uop_2_is_br : ren1_uops_2_is_br; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_is_jalr = _GEN ? r_uop_2_is_jalr : ren1_uops_2_is_jalr; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_is_jal = _GEN ? r_uop_2_is_jal : ren1_uops_2_is_jal; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_is_sfb = _GEN ? r_uop_2_is_sfb : ren1_uops_2_is_sfb; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_br_mask = _GEN ? r_uop_2_br_mask : ren1_uops_2_br_mask; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_br_tag = _GEN ? r_uop_2_br_tag : ren1_uops_2_br_tag; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_ftq_idx = _GEN ? r_uop_2_ftq_idx : ren1_uops_2_ftq_idx; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_edge_inst = _GEN ? r_uop_2_edge_inst : ren1_uops_2_edge_inst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_pc_lob = _GEN ? r_uop_2_pc_lob : ren1_uops_2_pc_lob; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_taken = _GEN ? r_uop_2_taken : ren1_uops_2_taken; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_imm_packed = _GEN ? r_uop_2_imm_packed : ren1_uops_2_imm_packed; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_csr_addr = _GEN ? r_uop_2_csr_addr : 12'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_rob_idx = _GEN ? r_uop_2_rob_idx : 7'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_ldq_idx = _GEN ? r_uop_2_ldq_idx : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_stq_idx = _GEN ? r_uop_2_stq_idx : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_rxq_idx = _GEN ? r_uop_2_rxq_idx : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_pdst = _GEN ? r_uop_2_pdst : 7'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_prs1 = _GEN ? r_uop_2_prs1 : ren1_uops_2_prs1; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_prs2 = _GEN ? r_uop_2_prs2 : ren1_uops_2_prs2; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_prs3 = _GEN ? r_uop_2_prs3 : ren1_uops_2_prs3; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_ppred = _GEN ? r_uop_2_ppred : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_prs1_busy = _GEN & r_uop_2_prs1_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_prs2_busy = _GEN & r_uop_2_prs2_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_prs3_busy = _GEN & r_uop_2_prs3_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_ppred_busy = _GEN & r_uop_2_ppred_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_stale_pdst = _GEN ? r_uop_2_stale_pdst : ren1_uops_2_stale_pdst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_exception = _GEN ? r_uop_2_exception : ren1_uops_2_exception; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_exc_cause = _GEN ? r_uop_2_exc_cause : ren1_uops_2_exc_cause; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_bypassable = _GEN ? r_uop_2_bypassable : ren1_uops_2_bypassable; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_mem_cmd = _GEN ? r_uop_2_mem_cmd : ren1_uops_2_mem_cmd; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_mem_size = _GEN ? r_uop_2_mem_size : ren1_uops_2_mem_size; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_mem_signed = _GEN ? r_uop_2_mem_signed : ren1_uops_2_mem_signed; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_is_fence = _GEN ? r_uop_2_is_fence : ren1_uops_2_is_fence; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_is_fencei = _GEN ? r_uop_2_is_fencei : ren1_uops_2_is_fencei; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_is_amo = _GEN ? r_uop_2_is_amo : ren1_uops_2_is_amo; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_uses_ldq = _GEN ? r_uop_2_uses_ldq : ren1_uops_2_uses_ldq; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_uses_stq = _GEN ? r_uop_2_uses_stq : ren1_uops_2_uses_stq; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_is_sys_pc2epc = _GEN ? r_uop_2_is_sys_pc2epc : ren1_uops_2_is_sys_pc2epc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_is_unique = _GEN ? r_uop_2_is_unique : ren1_uops_2_is_unique; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_flush_on_commit = _GEN ? r_uop_2_flush_on_commit : ren1_uops_2_flush_on_commit; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_ldst_is_rs1 = _GEN & r_uop_2_ldst_is_rs1; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_ldst = _GEN ? r_uop_2_ldst : ren1_uops_2_ldst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_lrs1 = _GEN ? r_uop_2_lrs1 : ren1_uops_2_lrs1; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_lrs2 = _GEN ? r_uop_2_lrs2 : ren1_uops_2_lrs2; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_lrs3 = _GEN ? r_uop_2_lrs3 : ren1_uops_2_lrs3; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_ldst_val = _GEN ? r_uop_2_ldst_val : ren1_uops_2_ldst_val; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_dst_rtype = _GEN ? r_uop_2_dst_rtype : ren1_uops_2_dst_rtype; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_lrs1_rtype = _GEN ? r_uop_2_lrs1_rtype : ren1_uops_2_lrs1_rtype; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_lrs2_rtype = _GEN ? r_uop_2_lrs2_rtype : ren1_uops_2_lrs2_rtype; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_frs3_en = _GEN ? r_uop_2_frs3_en : ren1_uops_2_frs3_en; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_fp_val = _GEN ? r_uop_2_fp_val : ren1_uops_2_fp_val; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_fp_single = _GEN ? r_uop_2_fp_single : ren1_uops_2_fp_single; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_xcpt_pf_if = _GEN ? r_uop_2_xcpt_pf_if : ren1_uops_2_xcpt_pf_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_xcpt_ae_if = _GEN ? r_uop_2_xcpt_ae_if : ren1_uops_2_xcpt_ae_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_xcpt_ma_if = _GEN & r_uop_2_xcpt_ma_if; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_bp_debug_if = _GEN ? r_uop_2_bp_debug_if : ren1_uops_2_bp_debug_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_bp_xcpt_if = _GEN ? r_uop_2_bp_xcpt_if : ren1_uops_2_bp_xcpt_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_debug_fsrc = _GEN ? r_uop_2_debug_fsrc : ren1_uops_2_debug_fsrc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_2_debug_tsrc = _GEN ? r_uop_2_debug_tsrc : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
wire [6:0] r_uop_newuop_2_uopc = r_uop_bypassed_uop_2_uopc; // @[util.scala:73:26]
wire [31:0] r_uop_newuop_2_inst = r_uop_bypassed_uop_2_inst; // @[util.scala:73:26]
wire [31:0] r_uop_newuop_2_debug_inst = r_uop_bypassed_uop_2_debug_inst; // @[util.scala:73:26]
wire r_uop_newuop_2_is_rvc = r_uop_bypassed_uop_2_is_rvc; // @[util.scala:73:26]
wire [39:0] r_uop_newuop_2_debug_pc = r_uop_bypassed_uop_2_debug_pc; // @[util.scala:73:26]
wire [2:0] r_uop_newuop_2_iq_type = r_uop_bypassed_uop_2_iq_type; // @[util.scala:73:26]
wire [9:0] r_uop_newuop_2_fu_code = r_uop_bypassed_uop_2_fu_code; // @[util.scala:73:26]
wire [3:0] r_uop_newuop_2_ctrl_br_type = r_uop_bypassed_uop_2_ctrl_br_type; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_2_ctrl_op1_sel = r_uop_bypassed_uop_2_ctrl_op1_sel; // @[util.scala:73:26]
wire [2:0] r_uop_newuop_2_ctrl_op2_sel = r_uop_bypassed_uop_2_ctrl_op2_sel; // @[util.scala:73:26]
wire [2:0] r_uop_newuop_2_ctrl_imm_sel = r_uop_bypassed_uop_2_ctrl_imm_sel; // @[util.scala:73:26]
wire [4:0] r_uop_newuop_2_ctrl_op_fcn = r_uop_bypassed_uop_2_ctrl_op_fcn; // @[util.scala:73:26]
wire r_uop_newuop_2_ctrl_fcn_dw = r_uop_bypassed_uop_2_ctrl_fcn_dw; // @[util.scala:73:26]
wire [2:0] r_uop_newuop_2_ctrl_csr_cmd = r_uop_bypassed_uop_2_ctrl_csr_cmd; // @[util.scala:73:26]
wire r_uop_newuop_2_ctrl_is_load = r_uop_bypassed_uop_2_ctrl_is_load; // @[util.scala:73:26]
wire r_uop_newuop_2_ctrl_is_sta = r_uop_bypassed_uop_2_ctrl_is_sta; // @[util.scala:73:26]
wire r_uop_newuop_2_ctrl_is_std = r_uop_bypassed_uop_2_ctrl_is_std; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_2_iw_state = r_uop_bypassed_uop_2_iw_state; // @[util.scala:73:26]
wire r_uop_newuop_2_iw_p1_poisoned = r_uop_bypassed_uop_2_iw_p1_poisoned; // @[util.scala:73:26]
wire r_uop_newuop_2_iw_p2_poisoned = r_uop_bypassed_uop_2_iw_p2_poisoned; // @[util.scala:73:26]
wire r_uop_newuop_2_is_br = r_uop_bypassed_uop_2_is_br; // @[util.scala:73:26]
wire r_uop_newuop_2_is_jalr = r_uop_bypassed_uop_2_is_jalr; // @[util.scala:73:26]
wire r_uop_newuop_2_is_jal = r_uop_bypassed_uop_2_is_jal; // @[util.scala:73:26]
wire r_uop_newuop_2_is_sfb = r_uop_bypassed_uop_2_is_sfb; // @[util.scala:73:26]
wire [3:0] r_uop_newuop_2_br_tag = r_uop_bypassed_uop_2_br_tag; // @[util.scala:73:26]
wire [4:0] r_uop_newuop_2_ftq_idx = r_uop_bypassed_uop_2_ftq_idx; // @[util.scala:73:26]
wire r_uop_newuop_2_edge_inst = r_uop_bypassed_uop_2_edge_inst; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_2_pc_lob = r_uop_bypassed_uop_2_pc_lob; // @[util.scala:73:26]
wire r_uop_newuop_2_taken = r_uop_bypassed_uop_2_taken; // @[util.scala:73:26]
wire [19:0] r_uop_newuop_2_imm_packed = r_uop_bypassed_uop_2_imm_packed; // @[util.scala:73:26]
wire [11:0] r_uop_newuop_2_csr_addr = r_uop_bypassed_uop_2_csr_addr; // @[util.scala:73:26]
wire [6:0] r_uop_newuop_2_rob_idx = r_uop_bypassed_uop_2_rob_idx; // @[util.scala:73:26]
wire [4:0] r_uop_newuop_2_ldq_idx = r_uop_bypassed_uop_2_ldq_idx; // @[util.scala:73:26]
wire [4:0] r_uop_newuop_2_stq_idx = r_uop_bypassed_uop_2_stq_idx; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_2_rxq_idx = r_uop_bypassed_uop_2_rxq_idx; // @[util.scala:73:26]
wire [6:0] r_uop_newuop_2_pdst = r_uop_bypassed_uop_2_pdst; // @[util.scala:73:26]
wire [6:0] r_uop_newuop_2_prs1 = r_uop_bypassed_uop_2_prs1; // @[util.scala:73:26]
wire [6:0] r_uop_newuop_2_prs2 = r_uop_bypassed_uop_2_prs2; // @[util.scala:73:26]
wire [6:0] r_uop_newuop_2_prs3 = r_uop_bypassed_uop_2_prs3; // @[util.scala:73:26]
wire _r_uop_bypassed_uop_prs1_busy_T_2; // @[rename-stage.scala:199:45]
wire [4:0] r_uop_newuop_2_ppred = r_uop_bypassed_uop_2_ppred; // @[util.scala:73:26]
wire _r_uop_bypassed_uop_prs2_busy_T_2; // @[rename-stage.scala:200:45]
wire r_uop_newuop_2_prs1_busy = r_uop_bypassed_uop_2_prs1_busy; // @[util.scala:73:26]
wire _r_uop_bypassed_uop_prs3_busy_T_2; // @[rename-stage.scala:201:45]
wire r_uop_newuop_2_prs2_busy = r_uop_bypassed_uop_2_prs2_busy; // @[util.scala:73:26]
wire r_uop_newuop_2_prs3_busy = r_uop_bypassed_uop_2_prs3_busy; // @[util.scala:73:26]
wire r_uop_newuop_2_ppred_busy = r_uop_bypassed_uop_2_ppred_busy; // @[util.scala:73:26]
wire [6:0] r_uop_newuop_2_stale_pdst = r_uop_bypassed_uop_2_stale_pdst; // @[util.scala:73:26]
wire r_uop_newuop_2_exception = r_uop_bypassed_uop_2_exception; // @[util.scala:73:26]
wire [63:0] r_uop_newuop_2_exc_cause = r_uop_bypassed_uop_2_exc_cause; // @[util.scala:73:26]
wire r_uop_newuop_2_bypassable = r_uop_bypassed_uop_2_bypassable; // @[util.scala:73:26]
wire [4:0] r_uop_newuop_2_mem_cmd = r_uop_bypassed_uop_2_mem_cmd; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_2_mem_size = r_uop_bypassed_uop_2_mem_size; // @[util.scala:73:26]
wire r_uop_newuop_2_mem_signed = r_uop_bypassed_uop_2_mem_signed; // @[util.scala:73:26]
wire r_uop_newuop_2_is_fence = r_uop_bypassed_uop_2_is_fence; // @[util.scala:73:26]
wire r_uop_newuop_2_is_fencei = r_uop_bypassed_uop_2_is_fencei; // @[util.scala:73:26]
wire r_uop_newuop_2_is_amo = r_uop_bypassed_uop_2_is_amo; // @[util.scala:73:26]
wire r_uop_newuop_2_uses_ldq = r_uop_bypassed_uop_2_uses_ldq; // @[util.scala:73:26]
wire r_uop_newuop_2_uses_stq = r_uop_bypassed_uop_2_uses_stq; // @[util.scala:73:26]
wire r_uop_newuop_2_is_sys_pc2epc = r_uop_bypassed_uop_2_is_sys_pc2epc; // @[util.scala:73:26]
wire r_uop_newuop_2_is_unique = r_uop_bypassed_uop_2_is_unique; // @[util.scala:73:26]
wire r_uop_newuop_2_flush_on_commit = r_uop_bypassed_uop_2_flush_on_commit; // @[util.scala:73:26]
wire r_uop_newuop_2_ldst_is_rs1 = r_uop_bypassed_uop_2_ldst_is_rs1; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_2_ldst = r_uop_bypassed_uop_2_ldst; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_2_lrs1 = r_uop_bypassed_uop_2_lrs1; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_2_lrs2 = r_uop_bypassed_uop_2_lrs2; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_2_lrs3 = r_uop_bypassed_uop_2_lrs3; // @[util.scala:73:26]
wire r_uop_newuop_2_ldst_val = r_uop_bypassed_uop_2_ldst_val; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_2_dst_rtype = r_uop_bypassed_uop_2_dst_rtype; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_2_lrs1_rtype = r_uop_bypassed_uop_2_lrs1_rtype; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_2_lrs2_rtype = r_uop_bypassed_uop_2_lrs2_rtype; // @[util.scala:73:26]
wire r_uop_newuop_2_frs3_en = r_uop_bypassed_uop_2_frs3_en; // @[util.scala:73:26]
wire r_uop_newuop_2_fp_val = r_uop_bypassed_uop_2_fp_val; // @[util.scala:73:26]
wire r_uop_newuop_2_fp_single = r_uop_bypassed_uop_2_fp_single; // @[util.scala:73:26]
wire r_uop_newuop_2_xcpt_pf_if = r_uop_bypassed_uop_2_xcpt_pf_if; // @[util.scala:73:26]
wire r_uop_newuop_2_xcpt_ae_if = r_uop_bypassed_uop_2_xcpt_ae_if; // @[util.scala:73:26]
wire r_uop_newuop_2_xcpt_ma_if = r_uop_bypassed_uop_2_xcpt_ma_if; // @[util.scala:73:26]
wire r_uop_newuop_2_bp_debug_if = r_uop_bypassed_uop_2_bp_debug_if; // @[util.scala:73:26]
wire r_uop_newuop_2_bp_xcpt_if = r_uop_bypassed_uop_2_bp_xcpt_if; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_2_debug_fsrc = r_uop_bypassed_uop_2_debug_fsrc; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_2_debug_tsrc = r_uop_bypassed_uop_2_debug_tsrc; // @[util.scala:73:26]
wire _r_uop_bypass_hits_rs1_T_6 = ren2_uops_0_ldst == next_uop_2_lrs1; // @[rename-stage.scala:108:29, :123:24, :177:87]
wire r_uop_bypass_hits_rs1_0_2 = ren2_alloc_reqs_0 & _r_uop_bypass_hits_rs1_T_6; // @[rename-stage.scala:109:29, :177:{77,87}]
wire _r_uop_bypass_hits_rs1_T_7 = ren2_uops_1_ldst == next_uop_2_lrs1; // @[rename-stage.scala:108:29, :123:24, :177:87]
wire r_uop_bypass_hits_rs1_1_2 = ren2_alloc_reqs_1 & _r_uop_bypass_hits_rs1_T_7; // @[rename-stage.scala:109:29, :177:{77,87}]
wire _r_uop_bypass_hits_rs1_T_8 = ren2_uops_2_ldst == next_uop_2_lrs1; // @[rename-stage.scala:108:29, :123:24, :177:87]
wire r_uop_bypass_hits_rs1_2_2 = ren2_alloc_reqs_2 & _r_uop_bypass_hits_rs1_T_8; // @[rename-stage.scala:109:29, :177:{77,87}]
wire _r_uop_bypass_hits_rs2_T_6 = ren2_uops_0_ldst == next_uop_2_lrs2; // @[rename-stage.scala:108:29, :123:24, :178:87]
wire r_uop_bypass_hits_rs2_0_2 = ren2_alloc_reqs_0 & _r_uop_bypass_hits_rs2_T_6; // @[rename-stage.scala:109:29, :178:{77,87}]
wire _r_uop_bypass_hits_rs2_T_7 = ren2_uops_1_ldst == next_uop_2_lrs2; // @[rename-stage.scala:108:29, :123:24, :178:87]
wire r_uop_bypass_hits_rs2_1_2 = ren2_alloc_reqs_1 & _r_uop_bypass_hits_rs2_T_7; // @[rename-stage.scala:109:29, :178:{77,87}]
wire _r_uop_bypass_hits_rs2_T_8 = ren2_uops_2_ldst == next_uop_2_lrs2; // @[rename-stage.scala:108:29, :123:24, :178:87]
wire r_uop_bypass_hits_rs2_2_2 = ren2_alloc_reqs_2 & _r_uop_bypass_hits_rs2_T_8; // @[rename-stage.scala:109:29, :178:{77,87}]
wire _r_uop_bypass_hits_rs3_T_6 = ren2_uops_0_ldst == next_uop_2_lrs3; // @[rename-stage.scala:108:29, :123:24, :179:87]
wire r_uop_bypass_hits_rs3_0_2 = ren2_alloc_reqs_0 & _r_uop_bypass_hits_rs3_T_6; // @[rename-stage.scala:109:29, :179:{77,87}]
wire _r_uop_bypass_hits_rs3_T_7 = ren2_uops_1_ldst == next_uop_2_lrs3; // @[rename-stage.scala:108:29, :123:24, :179:87]
wire r_uop_bypass_hits_rs3_1_2 = ren2_alloc_reqs_1 & _r_uop_bypass_hits_rs3_T_7; // @[rename-stage.scala:109:29, :179:{77,87}]
wire _r_uop_bypass_hits_rs3_T_8 = ren2_uops_2_ldst == next_uop_2_lrs3; // @[rename-stage.scala:108:29, :123:24, :179:87]
wire r_uop_bypass_hits_rs3_2_2 = ren2_alloc_reqs_2 & _r_uop_bypass_hits_rs3_T_8; // @[rename-stage.scala:109:29, :179:{77,87}]
wire _r_uop_bypass_hits_dst_T_6 = ren2_uops_0_ldst == next_uop_2_ldst; // @[rename-stage.scala:108:29, :123:24, :180:87]
wire r_uop_bypass_hits_dst_0_2 = ren2_alloc_reqs_0 & _r_uop_bypass_hits_dst_T_6; // @[rename-stage.scala:109:29, :180:{77,87}]
wire _r_uop_bypass_hits_dst_T_7 = ren2_uops_1_ldst == next_uop_2_ldst; // @[rename-stage.scala:108:29, :123:24, :180:87]
wire r_uop_bypass_hits_dst_1_2 = ren2_alloc_reqs_1 & _r_uop_bypass_hits_dst_T_7; // @[rename-stage.scala:109:29, :180:{77,87}]
wire _r_uop_bypass_hits_dst_T_8 = ren2_uops_2_ldst == next_uop_2_ldst; // @[rename-stage.scala:108:29, :123:24, :180:87]
wire r_uop_bypass_hits_dst_2_2 = ren2_alloc_reqs_2 & _r_uop_bypass_hits_dst_T_8; // @[rename-stage.scala:109:29, :180:{77,87}]
wire [2:0] _r_uop_bypass_sel_rs1_enc_T_4 = {r_uop_bypass_hits_rs1_0_2, 2'h0}; // @[Mux.scala:50:70]
wire [2:0] _r_uop_bypass_sel_rs1_enc_T_5 = r_uop_bypass_hits_rs1_1_2 ? 3'h2 : _r_uop_bypass_sel_rs1_enc_T_4; // @[Mux.scala:50:70]
wire [2:0] r_uop_bypass_sel_rs1_enc_2 = r_uop_bypass_hits_rs1_2_2 ? 3'h1 : _r_uop_bypass_sel_rs1_enc_T_5; // @[Mux.scala:50:70]
wire r_uop_bypass_sel_rs1_2_2 = r_uop_bypass_sel_rs1_enc_2[0]; // @[OneHot.scala:83:30]
wire r_uop_bypass_sel_rs1_1_2 = r_uop_bypass_sel_rs1_enc_2[1]; // @[OneHot.scala:83:30]
wire r_uop_bypass_sel_rs1_0_2 = r_uop_bypass_sel_rs1_enc_2[2]; // @[OneHot.scala:83:30]
wire [2:0] _r_uop_bypass_sel_rs2_enc_T_4 = {r_uop_bypass_hits_rs2_0_2, 2'h0}; // @[Mux.scala:50:70]
wire [2:0] _r_uop_bypass_sel_rs2_enc_T_5 = r_uop_bypass_hits_rs2_1_2 ? 3'h2 : _r_uop_bypass_sel_rs2_enc_T_4; // @[Mux.scala:50:70]
wire [2:0] r_uop_bypass_sel_rs2_enc_2 = r_uop_bypass_hits_rs2_2_2 ? 3'h1 : _r_uop_bypass_sel_rs2_enc_T_5; // @[Mux.scala:50:70]
wire r_uop_bypass_sel_rs2_2_2 = r_uop_bypass_sel_rs2_enc_2[0]; // @[OneHot.scala:83:30]
wire r_uop_bypass_sel_rs2_1_2 = r_uop_bypass_sel_rs2_enc_2[1]; // @[OneHot.scala:83:30]
wire r_uop_bypass_sel_rs2_0_2 = r_uop_bypass_sel_rs2_enc_2[2]; // @[OneHot.scala:83:30]
wire [2:0] _r_uop_bypass_sel_rs3_enc_T_4 = {r_uop_bypass_hits_rs3_0_2, 2'h0}; // @[Mux.scala:50:70]
wire [2:0] _r_uop_bypass_sel_rs3_enc_T_5 = r_uop_bypass_hits_rs3_1_2 ? 3'h2 : _r_uop_bypass_sel_rs3_enc_T_4; // @[Mux.scala:50:70]
wire [2:0] r_uop_bypass_sel_rs3_enc_2 = r_uop_bypass_hits_rs3_2_2 ? 3'h1 : _r_uop_bypass_sel_rs3_enc_T_5; // @[Mux.scala:50:70]
wire r_uop_bypass_sel_rs3_2_2 = r_uop_bypass_sel_rs3_enc_2[0]; // @[OneHot.scala:83:30]
wire r_uop_bypass_sel_rs3_1_2 = r_uop_bypass_sel_rs3_enc_2[1]; // @[OneHot.scala:83:30]
wire r_uop_bypass_sel_rs3_0_2 = r_uop_bypass_sel_rs3_enc_2[2]; // @[OneHot.scala:83:30]
wire [2:0] _r_uop_bypass_sel_dst_enc_T_4 = {r_uop_bypass_hits_dst_0_2, 2'h0}; // @[Mux.scala:50:70]
wire [2:0] _r_uop_bypass_sel_dst_enc_T_5 = r_uop_bypass_hits_dst_1_2 ? 3'h2 : _r_uop_bypass_sel_dst_enc_T_4; // @[Mux.scala:50:70]
wire [2:0] r_uop_bypass_sel_dst_enc_2 = r_uop_bypass_hits_dst_2_2 ? 3'h1 : _r_uop_bypass_sel_dst_enc_T_5; // @[Mux.scala:50:70]
wire r_uop_bypass_sel_dst_2_2 = r_uop_bypass_sel_dst_enc_2[0]; // @[OneHot.scala:83:30]
wire r_uop_bypass_sel_dst_1_2 = r_uop_bypass_sel_dst_enc_2[1]; // @[OneHot.scala:83:30]
wire r_uop_bypass_sel_dst_0_2 = r_uop_bypass_sel_dst_enc_2[2]; // @[OneHot.scala:83:30]
wire _r_uop_do_bypass_rs1_T_2 = r_uop_bypass_hits_rs1_0_2 | r_uop_bypass_hits_rs1_1_2; // @[rename-stage.scala:177:77, :187:49]
wire r_uop_do_bypass_rs1_2 = _r_uop_do_bypass_rs1_T_2 | r_uop_bypass_hits_rs1_2_2; // @[rename-stage.scala:177:77, :187:49]
wire _r_uop_do_bypass_rs2_T_2 = r_uop_bypass_hits_rs2_0_2 | r_uop_bypass_hits_rs2_1_2; // @[rename-stage.scala:178:77, :188:49]
wire r_uop_do_bypass_rs2_2 = _r_uop_do_bypass_rs2_T_2 | r_uop_bypass_hits_rs2_2_2; // @[rename-stage.scala:178:77, :188:49]
wire _r_uop_do_bypass_rs3_T_2 = r_uop_bypass_hits_rs3_0_2 | r_uop_bypass_hits_rs3_1_2; // @[rename-stage.scala:179:77, :189:49]
wire r_uop_do_bypass_rs3_2 = _r_uop_do_bypass_rs3_T_2 | r_uop_bypass_hits_rs3_2_2; // @[rename-stage.scala:179:77, :189:49]
wire _r_uop_do_bypass_dst_T_2 = r_uop_bypass_hits_dst_0_2 | r_uop_bypass_hits_dst_1_2; // @[rename-stage.scala:180:77, :190:49]
wire r_uop_do_bypass_dst_2 = _r_uop_do_bypass_dst_T_2 | r_uop_bypass_hits_dst_2_2; // @[rename-stage.scala:180:77, :190:49]
wire [6:0] _r_uop_bypassed_uop_prs1_T_10 = r_uop_bypass_sel_rs1_0_2 ? ren2_uops_0_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs1_T_11 = r_uop_bypass_sel_rs1_1_2 ? ren2_uops_1_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs1_T_12 = r_uop_bypass_sel_rs1_2_2 ? ren2_uops_2_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs1_T_13 = _r_uop_bypassed_uop_prs1_T_10 | _r_uop_bypassed_uop_prs1_T_11; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_prs1_T_14 = _r_uop_bypassed_uop_prs1_T_13 | _r_uop_bypassed_uop_prs1_T_12; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_prs1_WIRE_2 = _r_uop_bypassed_uop_prs1_T_14; // @[Mux.scala:30:73]
assign r_uop_bypassed_uop_2_prs1 = r_uop_do_bypass_rs1_2 ? _r_uop_bypassed_uop_prs1_WIRE_2 : next_uop_2_prs1; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_prs2_T_10 = r_uop_bypass_sel_rs2_0_2 ? ren2_uops_0_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs2_T_11 = r_uop_bypass_sel_rs2_1_2 ? ren2_uops_1_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs2_T_12 = r_uop_bypass_sel_rs2_2_2 ? ren2_uops_2_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs2_T_13 = _r_uop_bypassed_uop_prs2_T_10 | _r_uop_bypassed_uop_prs2_T_11; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_prs2_T_14 = _r_uop_bypassed_uop_prs2_T_13 | _r_uop_bypassed_uop_prs2_T_12; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_prs2_WIRE_2 = _r_uop_bypassed_uop_prs2_T_14; // @[Mux.scala:30:73]
assign r_uop_bypassed_uop_2_prs2 = r_uop_do_bypass_rs2_2 ? _r_uop_bypassed_uop_prs2_WIRE_2 : next_uop_2_prs2; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_prs3_T_10 = r_uop_bypass_sel_rs3_0_2 ? ren2_uops_0_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs3_T_11 = r_uop_bypass_sel_rs3_1_2 ? ren2_uops_1_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs3_T_12 = r_uop_bypass_sel_rs3_2_2 ? ren2_uops_2_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_prs3_T_13 = _r_uop_bypassed_uop_prs3_T_10 | _r_uop_bypassed_uop_prs3_T_11; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_prs3_T_14 = _r_uop_bypassed_uop_prs3_T_13 | _r_uop_bypassed_uop_prs3_T_12; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_prs3_WIRE_2 = _r_uop_bypassed_uop_prs3_T_14; // @[Mux.scala:30:73]
assign r_uop_bypassed_uop_2_prs3 = r_uop_do_bypass_rs3_2 ? _r_uop_bypassed_uop_prs3_WIRE_2 : next_uop_2_prs3; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_stale_pdst_T_10 = r_uop_bypass_sel_dst_0_2 ? ren2_uops_0_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_stale_pdst_T_11 = r_uop_bypass_sel_dst_1_2 ? ren2_uops_1_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_stale_pdst_T_12 = r_uop_bypass_sel_dst_2_2 ? ren2_uops_2_pdst : 7'h0; // @[OneHot.scala:83:30]
wire [6:0] _r_uop_bypassed_uop_stale_pdst_T_13 = _r_uop_bypassed_uop_stale_pdst_T_10 | _r_uop_bypassed_uop_stale_pdst_T_11; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_stale_pdst_T_14 = _r_uop_bypassed_uop_stale_pdst_T_13 | _r_uop_bypassed_uop_stale_pdst_T_12; // @[Mux.scala:30:73]
wire [6:0] _r_uop_bypassed_uop_stale_pdst_WIRE_2 = _r_uop_bypassed_uop_stale_pdst_T_14; // @[Mux.scala:30:73]
assign r_uop_bypassed_uop_2_stale_pdst = r_uop_do_bypass_dst_2 ? _r_uop_bypassed_uop_stale_pdst_WIRE_2 : next_uop_2_stale_pdst; // @[Mux.scala:30:73]
assign _r_uop_bypassed_uop_prs1_busy_T_2 = next_uop_2_prs1_busy | r_uop_do_bypass_rs1_2; // @[rename-stage.scala:123:24, :187:49, :199:45]
assign r_uop_bypassed_uop_2_prs1_busy = _r_uop_bypassed_uop_prs1_busy_T_2; // @[rename-stage.scala:174:28, :199:45]
assign _r_uop_bypassed_uop_prs2_busy_T_2 = next_uop_2_prs2_busy | r_uop_do_bypass_rs2_2; // @[rename-stage.scala:123:24, :188:49, :200:45]
assign r_uop_bypassed_uop_2_prs2_busy = _r_uop_bypassed_uop_prs2_busy_T_2; // @[rename-stage.scala:174:28, :200:45]
assign _r_uop_bypassed_uop_prs3_busy_T_2 = next_uop_2_prs3_busy | r_uop_do_bypass_rs3_2; // @[rename-stage.scala:123:24, :189:49, :201:45]
assign r_uop_bypassed_uop_2_prs3_busy = _r_uop_bypassed_uop_prs3_busy_T_2; // @[rename-stage.scala:174:28, :201:45]
wire [15:0] _r_uop_newuop_br_mask_T_5; // @[util.scala:74:35]
wire [15:0] r_uop_newuop_2_br_mask; // @[util.scala:73:26]
wire [15:0] _r_uop_newuop_br_mask_T_4 = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:74:37]
assign _r_uop_newuop_br_mask_T_5 = r_uop_bypassed_uop_2_br_mask & _r_uop_newuop_br_mask_T_4; // @[util.scala:74:{35,37}]
assign r_uop_newuop_2_br_mask = _r_uop_newuop_br_mask_T_5; // @[util.scala:73:26, :74:35]
wire _ren2_br_tags_0_valid_T_3; // @[rename-stage.scala:241:43]
wire _ren2_br_tags_1_valid_T_3; // @[rename-stage.scala:241:43]
wire _ren2_br_tags_2_valid_T_3; // @[rename-stage.scala:241:43]
wire ren2_br_tags_0_valid; // @[rename-stage.scala:233:29]
wire ren2_br_tags_1_valid; // @[rename-stage.scala:233:29]
wire ren2_br_tags_2_valid; // @[rename-stage.scala:233:29]
wire _com_valids_0_T_2; // @[rename-stage.scala:243:92]
wire _com_valids_1_T_2; // @[rename-stage.scala:243:92]
wire _com_valids_2_T_2; // @[rename-stage.scala:243:92]
wire com_valids_0; // @[rename-stage.scala:236:29]
wire com_valids_1; // @[rename-stage.scala:236:29]
wire com_valids_2; // @[rename-stage.scala:236:29]
wire _rbk_valids_0_T_2; // @[rename-stage.scala:244:92]
wire _rbk_valids_1_T_2; // @[rename-stage.scala:244:92]
wire _rbk_valids_2_T_2; // @[rename-stage.scala:244:92]
wire rbk_valids_0; // @[rename-stage.scala:237:29]
wire rbk_valids_1; // @[rename-stage.scala:237:29]
wire rbk_valids_2; // @[rename-stage.scala:237:29]
wire _GEN_0 = ren2_uops_0_dst_rtype == 2'h1; // @[rename-stage.scala:108:29, :240:78]
wire _ren2_alloc_reqs_0_T; // @[rename-stage.scala:240:78]
assign _ren2_alloc_reqs_0_T = _GEN_0; // @[rename-stage.scala:240:78]
wire _io_ren_stalls_0_T; // @[rename-stage.scala:339:49]
assign _io_ren_stalls_0_T = _GEN_0; // @[rename-stage.scala:240:78, :339:49]
wire _ren2_alloc_reqs_0_T_1 = ren2_uops_0_ldst_val & _ren2_alloc_reqs_0_T; // @[rename-stage.scala:108:29, :240:{52,78}]
assign _ren2_alloc_reqs_0_T_2 = _ren2_alloc_reqs_0_T_1 & io_dis_fire_0_0; // @[rename-stage.scala:160:7, :240:{52,88}]
assign ren2_alloc_reqs_0 = _ren2_alloc_reqs_0_T_2; // @[rename-stage.scala:109:29, :240:88]
wire _ren2_br_tags_0_valid_T = ~ren2_uops_0_is_sfb; // @[rename-stage.scala:108:29]
wire _ren2_br_tags_0_valid_T_1 = ren2_uops_0_is_br & _ren2_br_tags_0_valid_T; // @[rename-stage.scala:108:29]
wire _ren2_br_tags_0_valid_T_2 = _ren2_br_tags_0_valid_T_1 | ren2_uops_0_is_jalr; // @[rename-stage.scala:108:29]
assign _ren2_br_tags_0_valid_T_3 = io_dis_fire_0_0 & _ren2_br_tags_0_valid_T_2; // @[rename-stage.scala:160:7, :241:43]
assign ren2_br_tags_0_valid = _ren2_br_tags_0_valid_T_3; // @[rename-stage.scala:233:29, :241:43]
wire _GEN_1 = io_com_uops_0_dst_rtype_0 == 2'h1; // @[rename-stage.scala:160:7, :243:82]
wire _com_valids_0_T; // @[rename-stage.scala:243:82]
assign _com_valids_0_T = _GEN_1; // @[rename-stage.scala:243:82]
wire _rbk_valids_0_T; // @[rename-stage.scala:244:82]
assign _rbk_valids_0_T = _GEN_1; // @[rename-stage.scala:243:82, :244:82]
wire _com_valids_0_T_1 = io_com_uops_0_ldst_val_0 & _com_valids_0_T; // @[rename-stage.scala:160:7, :243:{54,82}]
assign _com_valids_0_T_2 = _com_valids_0_T_1 & io_com_valids_0_0; // @[rename-stage.scala:160:7, :243:{54,92}]
assign com_valids_0 = _com_valids_0_T_2; // @[rename-stage.scala:236:29, :243:92]
wire _rbk_valids_0_T_1 = io_com_uops_0_ldst_val_0 & _rbk_valids_0_T; // @[rename-stage.scala:160:7, :244:{54,82}]
assign _rbk_valids_0_T_2 = _rbk_valids_0_T_1 & io_rbk_valids_0_0; // @[rename-stage.scala:160:7, :244:{54,92}]
assign rbk_valids_0 = _rbk_valids_0_T_2; // @[rename-stage.scala:237:29, :244:92]
wire _GEN_2 = ren2_uops_1_dst_rtype == 2'h1; // @[rename-stage.scala:108:29, :240:78]
wire _ren2_alloc_reqs_1_T; // @[rename-stage.scala:240:78]
assign _ren2_alloc_reqs_1_T = _GEN_2; // @[rename-stage.scala:240:78]
wire _io_ren_stalls_1_T; // @[rename-stage.scala:339:49]
assign _io_ren_stalls_1_T = _GEN_2; // @[rename-stage.scala:240:78, :339:49]
wire _ren2_alloc_reqs_1_T_1 = ren2_uops_1_ldst_val & _ren2_alloc_reqs_1_T; // @[rename-stage.scala:108:29, :240:{52,78}]
assign _ren2_alloc_reqs_1_T_2 = _ren2_alloc_reqs_1_T_1 & io_dis_fire_1_0; // @[rename-stage.scala:160:7, :240:{52,88}]
assign ren2_alloc_reqs_1 = _ren2_alloc_reqs_1_T_2; // @[rename-stage.scala:109:29, :240:88]
wire _ren2_br_tags_1_valid_T = ~ren2_uops_1_is_sfb; // @[rename-stage.scala:108:29]
wire _ren2_br_tags_1_valid_T_1 = ren2_uops_1_is_br & _ren2_br_tags_1_valid_T; // @[rename-stage.scala:108:29]
wire _ren2_br_tags_1_valid_T_2 = _ren2_br_tags_1_valid_T_1 | ren2_uops_1_is_jalr; // @[rename-stage.scala:108:29]
assign _ren2_br_tags_1_valid_T_3 = io_dis_fire_1_0 & _ren2_br_tags_1_valid_T_2; // @[rename-stage.scala:160:7, :241:43]
assign ren2_br_tags_1_valid = _ren2_br_tags_1_valid_T_3; // @[rename-stage.scala:233:29, :241:43]
wire _GEN_3 = io_com_uops_1_dst_rtype_0 == 2'h1; // @[rename-stage.scala:160:7, :243:82]
wire _com_valids_1_T; // @[rename-stage.scala:243:82]
assign _com_valids_1_T = _GEN_3; // @[rename-stage.scala:243:82]
wire _rbk_valids_1_T; // @[rename-stage.scala:244:82]
assign _rbk_valids_1_T = _GEN_3; // @[rename-stage.scala:243:82, :244:82]
wire _com_valids_1_T_1 = io_com_uops_1_ldst_val_0 & _com_valids_1_T; // @[rename-stage.scala:160:7, :243:{54,82}]
assign _com_valids_1_T_2 = _com_valids_1_T_1 & io_com_valids_1_0; // @[rename-stage.scala:160:7, :243:{54,92}]
assign com_valids_1 = _com_valids_1_T_2; // @[rename-stage.scala:236:29, :243:92]
wire _rbk_valids_1_T_1 = io_com_uops_1_ldst_val_0 & _rbk_valids_1_T; // @[rename-stage.scala:160:7, :244:{54,82}]
assign _rbk_valids_1_T_2 = _rbk_valids_1_T_1 & io_rbk_valids_1_0; // @[rename-stage.scala:160:7, :244:{54,92}]
assign rbk_valids_1 = _rbk_valids_1_T_2; // @[rename-stage.scala:237:29, :244:92]
wire _GEN_4 = ren2_uops_2_dst_rtype == 2'h1; // @[rename-stage.scala:108:29, :240:78]
wire _ren2_alloc_reqs_2_T; // @[rename-stage.scala:240:78]
assign _ren2_alloc_reqs_2_T = _GEN_4; // @[rename-stage.scala:240:78]
wire _io_ren_stalls_2_T; // @[rename-stage.scala:339:49]
assign _io_ren_stalls_2_T = _GEN_4; // @[rename-stage.scala:240:78, :339:49]
wire _ren2_alloc_reqs_2_T_1 = ren2_uops_2_ldst_val & _ren2_alloc_reqs_2_T; // @[rename-stage.scala:108:29, :240:{52,78}]
assign _ren2_alloc_reqs_2_T_2 = _ren2_alloc_reqs_2_T_1 & io_dis_fire_2_0; // @[rename-stage.scala:160:7, :240:{52,88}]
assign ren2_alloc_reqs_2 = _ren2_alloc_reqs_2_T_2; // @[rename-stage.scala:109:29, :240:88]
wire _ren2_br_tags_2_valid_T = ~ren2_uops_2_is_sfb; // @[rename-stage.scala:108:29]
wire _ren2_br_tags_2_valid_T_1 = ren2_uops_2_is_br & _ren2_br_tags_2_valid_T; // @[rename-stage.scala:108:29]
wire _ren2_br_tags_2_valid_T_2 = _ren2_br_tags_2_valid_T_1 | ren2_uops_2_is_jalr; // @[rename-stage.scala:108:29]
assign _ren2_br_tags_2_valid_T_3 = io_dis_fire_2_0 & _ren2_br_tags_2_valid_T_2; // @[rename-stage.scala:160:7, :241:43]
assign ren2_br_tags_2_valid = _ren2_br_tags_2_valid_T_3; // @[rename-stage.scala:233:29, :241:43]
wire _GEN_5 = io_com_uops_2_dst_rtype_0 == 2'h1; // @[rename-stage.scala:160:7, :243:82]
wire _com_valids_2_T; // @[rename-stage.scala:243:82]
assign _com_valids_2_T = _GEN_5; // @[rename-stage.scala:243:82]
wire _rbk_valids_2_T; // @[rename-stage.scala:244:82]
assign _rbk_valids_2_T = _GEN_5; // @[rename-stage.scala:243:82, :244:82]
wire _com_valids_2_T_1 = io_com_uops_2_ldst_val_0 & _com_valids_2_T; // @[rename-stage.scala:160:7, :243:{54,82}]
assign _com_valids_2_T_2 = _com_valids_2_T_1 & io_com_valids_2_0; // @[rename-stage.scala:160:7, :243:{54,92}]
assign com_valids_2 = _com_valids_2_T_2; // @[rename-stage.scala:236:29, :243:92]
wire _rbk_valids_2_T_1 = io_com_uops_2_ldst_val_0 & _rbk_valids_2_T; // @[rename-stage.scala:160:7, :244:{54,82}]
assign _rbk_valids_2_T_2 = _rbk_valids_2_T_1 & io_rbk_valids_2_0; // @[rename-stage.scala:160:7, :244:{54,92}]
assign rbk_valids_2 = _rbk_valids_2_T_2; // @[rename-stage.scala:237:29, :244:92]
wire [5:0] _remap_reqs_0_ldst_T; // @[rename-stage.scala:262:30]
wire [6:0] _remap_reqs_0_pdst_T; // @[rename-stage.scala:263:30]
wire _remap_reqs_0_valid_T; // @[rename-stage.scala:266:38]
wire [5:0] _remap_reqs_1_ldst_T; // @[rename-stage.scala:262:30]
wire [6:0] _remap_reqs_1_pdst_T; // @[rename-stage.scala:263:30]
wire _remap_reqs_1_valid_T; // @[rename-stage.scala:266:38]
wire [5:0] _remap_reqs_2_ldst_T; // @[rename-stage.scala:262:30]
wire [6:0] _remap_reqs_2_pdst_T; // @[rename-stage.scala:263:30]
wire _remap_reqs_2_valid_T; // @[rename-stage.scala:266:38]
wire [5:0] remap_reqs_0_ldst; // @[rename-stage.scala:253:24]
wire [6:0] remap_reqs_0_pdst; // @[rename-stage.scala:253:24]
wire remap_reqs_0_valid; // @[rename-stage.scala:253:24]
wire [5:0] remap_reqs_1_ldst; // @[rename-stage.scala:253:24]
wire [6:0] remap_reqs_1_pdst; // @[rename-stage.scala:253:24]
wire remap_reqs_1_valid; // @[rename-stage.scala:253:24]
wire [5:0] remap_reqs_2_ldst; // @[rename-stage.scala:253:24]
wire [6:0] remap_reqs_2_pdst; // @[rename-stage.scala:253:24]
wire remap_reqs_2_valid; // @[rename-stage.scala:253:24]
assign _remap_reqs_0_ldst_T = io_rollback_0 ? io_com_uops_2_ldst_0 : ren2_uops_0_ldst; // @[rename-stage.scala:108:29, :160:7, :262:30]
assign remap_reqs_0_ldst = _remap_reqs_0_ldst_T; // @[rename-stage.scala:253:24, :262:30]
assign _remap_reqs_0_pdst_T = io_rollback_0 ? io_com_uops_2_stale_pdst_0 : ren2_uops_0_pdst; // @[rename-stage.scala:108:29, :160:7, :263:30]
assign remap_reqs_0_pdst = _remap_reqs_0_pdst_T; // @[rename-stage.scala:253:24, :263:30]
assign _remap_reqs_1_ldst_T = io_rollback_0 ? io_com_uops_1_ldst_0 : ren2_uops_1_ldst; // @[rename-stage.scala:108:29, :160:7, :262:30]
assign remap_reqs_1_ldst = _remap_reqs_1_ldst_T; // @[rename-stage.scala:253:24, :262:30]
assign _remap_reqs_1_pdst_T = io_rollback_0 ? io_com_uops_1_stale_pdst_0 : ren2_uops_1_pdst; // @[rename-stage.scala:108:29, :160:7, :263:30]
assign remap_reqs_1_pdst = _remap_reqs_1_pdst_T; // @[rename-stage.scala:253:24, :263:30]
assign _remap_reqs_2_ldst_T = io_rollback_0 ? io_com_uops_0_ldst_0 : ren2_uops_2_ldst; // @[rename-stage.scala:108:29, :160:7, :262:30]
assign remap_reqs_2_ldst = _remap_reqs_2_ldst_T; // @[rename-stage.scala:253:24, :262:30]
assign _remap_reqs_2_pdst_T = io_rollback_0 ? io_com_uops_0_stale_pdst_0 : ren2_uops_2_pdst; // @[rename-stage.scala:108:29, :160:7, :263:30]
assign remap_reqs_2_pdst = _remap_reqs_2_pdst_T; // @[rename-stage.scala:253:24, :263:30]
assign _remap_reqs_0_valid_T = ren2_alloc_reqs_0 | rbk_valids_2; // @[rename-stage.scala:109:29, :237:29, :266:38]
assign remap_reqs_0_valid = _remap_reqs_0_valid_T; // @[rename-stage.scala:253:24, :266:38]
assign _remap_reqs_1_valid_T = ren2_alloc_reqs_1 | rbk_valids_1; // @[rename-stage.scala:109:29, :237:29, :266:38]
assign remap_reqs_1_valid = _remap_reqs_1_valid_T; // @[rename-stage.scala:253:24, :266:38]
assign _remap_reqs_2_valid_T = ren2_alloc_reqs_2 | rbk_valids_0; // @[rename-stage.scala:109:29, :237:29, :266:38]
assign remap_reqs_2_valid = _remap_reqs_2_valid_T; // @[rename-stage.scala:253:24, :266:38]
wire _freelist_io_dealloc_pregs_0_valid_T = com_valids_0 | rbk_valids_0; // @[rename-stage.scala:236:29, :237:29, :293:37]
wire _freelist_io_dealloc_pregs_1_valid_T = com_valids_1 | rbk_valids_1; // @[rename-stage.scala:236:29, :237:29, :293:37]
wire _freelist_io_dealloc_pregs_2_valid_T = com_valids_2 | rbk_valids_2; // @[rename-stage.scala:236:29, :237:29, :293:37]
wire [6:0] _freelist_io_dealloc_pregs_0_bits_T = io_rollback_0 ? io_com_uops_0_pdst_0 : io_com_uops_0_stale_pdst_0; // @[rename-stage.scala:160:7, :295:33]
wire [6:0] _freelist_io_dealloc_pregs_1_bits_T = io_rollback_0 ? io_com_uops_1_pdst_0 : io_com_uops_1_stale_pdst_0; // @[rename-stage.scala:160:7, :295:33]
wire [6:0] _freelist_io_dealloc_pregs_2_bits_T = io_rollback_0 ? io_com_uops_2_pdst_0 : io_com_uops_2_stale_pdst_0; // @[rename-stage.scala:160:7, :295:33]
wire _ren2_uops_0_pdst_T = |ren2_uops_0_ldst; // @[rename-stage.scala:108:29, :306:30]
assign ren2_uops_0_pdst = _ren2_uops_0_pdst_T_2; // @[rename-stage.scala:108:29, :306:20]
wire _ren2_uops_1_pdst_T = |ren2_uops_1_ldst; // @[rename-stage.scala:108:29, :306:30]
assign ren2_uops_1_pdst = _ren2_uops_1_pdst_T_2; // @[rename-stage.scala:108:29, :306:20]
wire _ren2_uops_2_pdst_T = |ren2_uops_2_ldst; // @[rename-stage.scala:108:29, :306:30]
assign ren2_uops_2_pdst = _ren2_uops_2_pdst_T_2; // @[rename-stage.scala:108:29, :306:20] |
Generate the Verilog code corresponding to the following Chisel files.
File PMA.scala:
// See LICENSE.SiFive for license details.
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import freechips.rocketchip.devices.debug.DebugModuleKey
import freechips.rocketchip.diplomacy.RegionType
import freechips.rocketchip.subsystem.CacheBlockBytes
import freechips.rocketchip.tile.{CoreModule, CoreBundle}
import freechips.rocketchip.tilelink.{TLSlavePortParameters, TLManagerParameters}
class PMAChecker(manager: TLSlavePortParameters)(implicit p: Parameters) extends CoreModule()(p) {
val io = IO(new Bundle {
val paddr = Input(UInt())
val resp = Output(new Bundle {
val cacheable = Bool()
val r = Bool()
val w = Bool()
val pp = Bool()
val al = Bool()
val aa = Bool()
val x = Bool()
val eff = Bool()
})
})
// PMA
// check exist a slave can consume this address.
val legal_address = manager.findSafe(io.paddr).reduce(_||_)
// check utility to help check SoC property.
def fastCheck(member: TLManagerParameters => Boolean) =
legal_address && manager.fastProperty(io.paddr, member, (b:Boolean) => b.B)
io.resp.cacheable := fastCheck(_.supportsAcquireB)
io.resp.r := fastCheck(_.supportsGet)
io.resp.w := fastCheck(_.supportsPutFull)
io.resp.pp := fastCheck(_.supportsPutPartial)
io.resp.al := fastCheck(_.supportsLogical)
io.resp.aa := fastCheck(_.supportsArithmetic)
io.resp.x := fastCheck(_.executable)
io.resp.eff := fastCheck(Seq(RegionType.PUT_EFFECTS, RegionType.GET_EFFECTS) contains _.regionType)
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
| module PMAChecker( // @[PMA.scala:18:7]
input [39:0] io_paddr, // @[PMA.scala:19:14]
output io_resp_cacheable, // @[PMA.scala:19:14]
output io_resp_r, // @[PMA.scala:19:14]
output io_resp_w, // @[PMA.scala:19:14]
output io_resp_pp, // @[PMA.scala:19:14]
output io_resp_al, // @[PMA.scala:19:14]
output io_resp_aa, // @[PMA.scala:19:14]
output io_resp_x, // @[PMA.scala:19:14]
output io_resp_eff // @[PMA.scala:19:14]
);
wire [5:0] _GEN = io_paddr[13:8] ^ 6'h20; // @[Parameters.scala:137:31]
wire [8:0] _GEN_0 = io_paddr[20:12] ^ 9'h100; // @[Parameters.scala:137:31]
wire [9:0] _GEN_1 = io_paddr[25:16] ^ 10'h200; // @[Parameters.scala:137:31]
wire [13:0] _GEN_2 = io_paddr[25:12] ^ 14'h2010; // @[Parameters.scala:137:31]
wire [11:0] _GEN_3 = io_paddr[27:16] ^ 12'h800; // @[Parameters.scala:137:31]
wire [3:0] _GEN_4 = io_paddr[31:28] ^ 4'h8; // @[Parameters.scala:137:31]
wire legal_address = io_paddr[39:12] == 28'h0 | {io_paddr[39:13], ~(io_paddr[12])} == 28'h0 | {io_paddr[39:14], _GEN} == 32'h0 | {io_paddr[39:14], io_paddr[13:8] ^ 6'h21} == 32'h0 | {io_paddr[39:14], io_paddr[13:8] ^ 6'h22} == 32'h0 | {io_paddr[39:14], io_paddr[13:8] ^ 6'h23} == 32'h0 | {io_paddr[39:14], io_paddr[13:8] ^ 6'h24} == 32'h0 | {io_paddr[39:14], ~(io_paddr[13:12])} == 28'h0 | {io_paddr[39:17], ~(io_paddr[16])} == 24'h0 | {io_paddr[39:21], _GEN_0} == 28'h0 | {io_paddr[39:21], io_paddr[20:12] ^ 9'h110} == 28'h0 | {io_paddr[39:26], _GEN_1} == 24'h0 | {io_paddr[39:26], _GEN_2} == 28'h0 | {io_paddr[39:28], _GEN_3} == 24'h0 | {io_paddr[39:28], ~(io_paddr[27:26])} == 14'h0 | {io_paddr[39:29], io_paddr[28:12] ^ 17'h10020} == 28'h0 | {io_paddr[39:32], _GEN_4} == 12'h0; // @[PMA.scala:19:14, :36:58]
wire [3:0] _GEN_5 = {io_paddr[31], io_paddr[27], io_paddr[20], io_paddr[16]}; // @[Parameters.scala:137:{31,41,46}]
wire [4:0] _GEN_6 = {io_paddr[31], io_paddr[27], io_paddr[25], _GEN_0[8], io_paddr[13]}; // @[Parameters.scala:137:{31,41,46}]
wire [5:0] _GEN_7 = {io_paddr[31], io_paddr[27], _GEN_2[13], io_paddr[20], _GEN_2[4], io_paddr[13]}; // @[Parameters.scala:137:{31,41,46}]
wire [1:0] _GEN_8 = {io_paddr[31], _GEN_3[11]}; // @[Parameters.scala:137:{31,41,46}]
wire [4:0] _GEN_9 = {io_paddr[31], _GEN_3[11], io_paddr[25], io_paddr[20], io_paddr[16]}; // @[Parameters.scala:137:{31,41,46}]
assign io_resp_cacheable = legal_address & ({io_paddr[31], _GEN_3[11:10]} == 3'h0 | ~(_GEN_4[3])); // @[Parameters.scala:629:89]
assign io_resp_r = legal_address; // @[PMA.scala:18:7, :36:58]
assign io_resp_w = legal_address & (~(|_GEN_5) | ~(|_GEN_6) | ~(|_GEN_7) | ~(|_GEN_8) | ~(|_GEN_9) | ~(_GEN_4[3])); // @[Parameters.scala:629:89]
assign io_resp_pp = legal_address & (~(|_GEN_5) | ~(|_GEN_6) | ~(|_GEN_7) | ~(|_GEN_8) | ~(|_GEN_9) | ~(_GEN_4[3])); // @[Parameters.scala:629:89]
assign io_resp_al = legal_address & (~(|_GEN_5) | ~(|_GEN_6) | ~(|_GEN_7) | ~(|_GEN_8) | ~(|_GEN_9) | ~(_GEN_4[3])); // @[Parameters.scala:629:89]
assign io_resp_aa = legal_address & (~(|_GEN_5) | ~(|_GEN_6) | ~(|_GEN_7) | ~(|_GEN_8) | ~(|_GEN_9) | ~(_GEN_4[3])); // @[Parameters.scala:629:89]
assign io_resp_x = legal_address & ({io_paddr[31], io_paddr[28:25], io_paddr[20], io_paddr[16], io_paddr[13:12]} == 9'h0 | {io_paddr[31], io_paddr[28:25], io_paddr[20], io_paddr[16], ~(io_paddr[13:12])} == 9'h0 | {io_paddr[31], io_paddr[28:25], io_paddr[20], ~(io_paddr[16])} == 7'h0 | {io_paddr[31], io_paddr[28], _GEN_3[11:9], io_paddr[20], io_paddr[16]} == 7'h0 | {_GEN_4[3], io_paddr[28]} == 2'h0); // @[Parameters.scala:629:89]
assign io_resp_eff = legal_address & ({io_paddr[31], io_paddr[27:25], io_paddr[20], io_paddr[16], io_paddr[13]} == 7'h0 | {io_paddr[31], io_paddr[27:25], io_paddr[20], io_paddr[16], _GEN[5:4]} == 8'h0 | {io_paddr[31], io_paddr[27:25], _GEN_0[8], io_paddr[13:12]} == 7'h0 | {io_paddr[31], io_paddr[27:26], _GEN_1[9], io_paddr[20], io_paddr[16]} == 6'h0 | {io_paddr[31], io_paddr[27:26], _GEN_2[13], io_paddr[20], _GEN_2[4], io_paddr[13:12]} == 8'h0 | {io_paddr[31], ~(io_paddr[27:26])} == 3'h0); // @[Parameters.scala:629:89]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RecFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import consts._
class
RecFNToRecFN(
inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int)
extends chisel3.RawModule
{
val io = IO(new Bundle {
val in = Input(Bits((inExpWidth + inSigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawIn = rawFloatFromRecFN(inExpWidth, inSigWidth, io.in);
if ((inExpWidth == outExpWidth) && (inSigWidth <= outSigWidth)) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
io.out := io.in<<(outSigWidth - inSigWidth)
io.exceptionFlags := isSigNaNRawFloat(rawIn) ## 0.U(4.W)
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
inExpWidth,
inSigWidth,
outExpWidth,
outSigWidth,
flRoundOpt_sigMSBitAlwaysZero
))
roundAnyRawFNToRecFN.io.invalidExc := isSigNaNRawFloat(rawIn)
roundAnyRawFNToRecFN.io.infiniteExc := false.B
roundAnyRawFNToRecFN.io.in := rawIn
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
}
File rawFloatFromRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
/*----------------------------------------------------------------------------
| In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be
| set.
*----------------------------------------------------------------------------*/
object rawFloatFromRecFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat =
{
val exp = in(expWidth + sigWidth - 1, sigWidth - 1)
val isZero = exp(expWidth, expWidth - 2) === 0.U
val isSpecial = exp(expWidth, expWidth - 1) === 3.U
val out = Wire(new RawFloat(expWidth, sigWidth))
out.isNaN := isSpecial && exp(expWidth - 2)
out.isInf := isSpecial && ! exp(expWidth - 2)
out.isZero := isZero
out.sign := in(expWidth + sigWidth)
out.sExp := exp.zext
out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0)
out
}
}
| module RecFNToRecFN_106( // @[RecFNToRecFN.scala:44:5]
input [32:0] io_in, // @[RecFNToRecFN.scala:48:16]
output [32:0] io_out // @[RecFNToRecFN.scala:48:16]
);
wire [32:0] io_in_0 = io_in; // @[RecFNToRecFN.scala:44:5]
wire io_detectTininess = 1'h1; // @[RecFNToRecFN.scala:44:5, :48:16]
wire [2:0] io_roundingMode = 3'h0; // @[RecFNToRecFN.scala:44:5, :48:16]
wire [32:0] _io_out_T = io_in_0; // @[RecFNToRecFN.scala:44:5, :64:35]
wire [4:0] _io_exceptionFlags_T_3; // @[RecFNToRecFN.scala:65:54]
wire [32:0] io_out_0; // @[RecFNToRecFN.scala:44:5]
wire [4:0] io_exceptionFlags; // @[RecFNToRecFN.scala:44:5]
wire [8:0] rawIn_exp = io_in_0[31:23]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _rawIn_isZero_T = rawIn_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire rawIn_isZero = _rawIn_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
wire rawIn_isZero_0 = rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _rawIn_isSpecial_T = rawIn_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire rawIn_isSpecial = &_rawIn_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
wire _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
wire _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire [9:0] _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire [24:0] _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire rawIn_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire rawIn_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [9:0] rawIn_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] rawIn_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _rawIn_out_isNaN_T = rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _rawIn_out_isInf_T = rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _rawIn_out_isNaN_T_1 = rawIn_isSpecial & _rawIn_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign rawIn_isNaN = _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _rawIn_out_isInf_T_1 = ~_rawIn_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _rawIn_out_isInf_T_2 = rawIn_isSpecial & _rawIn_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign rawIn_isInf = _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _rawIn_out_sign_T = io_in_0[32]; // @[rawFloatFromRecFN.scala:59:25]
assign rawIn_sign = _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _rawIn_out_sExp_T = {1'h0, rawIn_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign rawIn_sExp = _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _rawIn_out_sig_T = ~rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _rawIn_out_sig_T_1 = {1'h0, _rawIn_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [22:0] _rawIn_out_sig_T_2 = io_in_0[22:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _rawIn_out_sig_T_3 = {_rawIn_out_sig_T_1, _rawIn_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign rawIn_sig = _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
assign io_out_0 = _io_out_T; // @[RecFNToRecFN.scala:44:5, :64:35]
wire _io_exceptionFlags_T = rawIn_sig[22]; // @[rawFloatFromRecFN.scala:55:23]
wire _io_exceptionFlags_T_1 = ~_io_exceptionFlags_T; // @[common.scala:82:{49,56}]
wire _io_exceptionFlags_T_2 = rawIn_isNaN & _io_exceptionFlags_T_1; // @[rawFloatFromRecFN.scala:55:23]
assign _io_exceptionFlags_T_3 = {_io_exceptionFlags_T_2, 4'h0}; // @[common.scala:82:46]
assign io_exceptionFlags = _io_exceptionFlags_T_3; // @[RecFNToRecFN.scala:44:5, :65:54]
assign io_out = io_out_0; // @[RecFNToRecFN.scala:44:5]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File MSHR.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import freechips.rocketchip.tilelink._
import TLPermissions._
import TLMessages._
import MetaData._
import chisel3.PrintableHelper
import chisel3.experimental.dataview._
class ScheduleRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val a = Valid(new SourceARequest(params))
val b = Valid(new SourceBRequest(params))
val c = Valid(new SourceCRequest(params))
val d = Valid(new SourceDRequest(params))
val e = Valid(new SourceERequest(params))
val x = Valid(new SourceXRequest(params))
val dir = Valid(new DirectoryWrite(params))
val reload = Bool() // get next request via allocate (if any)
}
class MSHRStatus(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val way = UInt(params.wayBits.W)
val blockB = Bool()
val nestB = Bool()
val blockC = Bool()
val nestC = Bool()
}
class NestedWriteback(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val b_toN = Bool() // nested Probes may unhit us
val b_toB = Bool() // nested Probes may demote us
val b_clr_dirty = Bool() // nested Probes clear dirty
val c_set_dirty = Bool() // nested Releases MAY set dirty
}
sealed trait CacheState
{
val code = CacheState.index.U
CacheState.index = CacheState.index + 1
}
object CacheState
{
var index = 0
}
case object S_INVALID extends CacheState
case object S_BRANCH extends CacheState
case object S_BRANCH_C extends CacheState
case object S_TIP extends CacheState
case object S_TIP_C extends CacheState
case object S_TIP_CD extends CacheState
case object S_TIP_D extends CacheState
case object S_TRUNK_C extends CacheState
case object S_TRUNK_CD extends CacheState
class MSHR(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val allocate = Flipped(Valid(new AllocateRequest(params))) // refills MSHR for next cycle
val directory = Flipped(Valid(new DirectoryResult(params))) // triggers schedule setup
val status = Valid(new MSHRStatus(params))
val schedule = Decoupled(new ScheduleRequest(params))
val sinkc = Flipped(Valid(new SinkCResponse(params)))
val sinkd = Flipped(Valid(new SinkDResponse(params)))
val sinke = Flipped(Valid(new SinkEResponse(params)))
val nestedwb = Flipped(new NestedWriteback(params))
})
val request_valid = RegInit(false.B)
val request = Reg(new FullRequest(params))
val meta_valid = RegInit(false.B)
val meta = Reg(new DirectoryResult(params))
// Define which states are valid
when (meta_valid) {
when (meta.state === INVALID) {
assert (!meta.clients.orR)
assert (!meta.dirty)
}
when (meta.state === BRANCH) {
assert (!meta.dirty)
}
when (meta.state === TRUNK) {
assert (meta.clients.orR)
assert ((meta.clients & (meta.clients - 1.U)) === 0.U) // at most one
}
when (meta.state === TIP) {
// noop
}
}
// Completed transitions (s_ = scheduled), (w_ = waiting)
val s_rprobe = RegInit(true.B) // B
val w_rprobeackfirst = RegInit(true.B)
val w_rprobeacklast = RegInit(true.B)
val s_release = RegInit(true.B) // CW w_rprobeackfirst
val w_releaseack = RegInit(true.B)
val s_pprobe = RegInit(true.B) // B
val s_acquire = RegInit(true.B) // A s_release, s_pprobe [1]
val s_flush = RegInit(true.B) // X w_releaseack
val w_grantfirst = RegInit(true.B)
val w_grantlast = RegInit(true.B)
val w_grant = RegInit(true.B) // first | last depending on wormhole
val w_pprobeackfirst = RegInit(true.B)
val w_pprobeacklast = RegInit(true.B)
val w_pprobeack = RegInit(true.B) // first | last depending on wormhole
val s_probeack = RegInit(true.B) // C w_pprobeackfirst (mutually exclusive with next two s_*)
val s_grantack = RegInit(true.B) // E w_grantfirst ... CAN require both outE&inD to service outD
val s_execute = RegInit(true.B) // D w_pprobeack, w_grant
val w_grantack = RegInit(true.B)
val s_writeback = RegInit(true.B) // W w_*
// [1]: We cannot issue outer Acquire while holding blockB (=> outA can stall)
// However, inB and outC are higher priority than outB, so s_release and s_pprobe
// may be safely issued while blockB. Thus we must NOT try to schedule the
// potentially stuck s_acquire with either of them (scheduler is all or none).
// Meta-data that we discover underway
val sink = Reg(UInt(params.outer.bundle.sinkBits.W))
val gotT = Reg(Bool())
val bad_grant = Reg(Bool())
val probes_done = Reg(UInt(params.clientBits.W))
val probes_toN = Reg(UInt(params.clientBits.W))
val probes_noT = Reg(Bool())
// When a nested transaction completes, update our meta data
when (meta_valid && meta.state =/= INVALID &&
io.nestedwb.set === request.set && io.nestedwb.tag === meta.tag) {
when (io.nestedwb.b_clr_dirty) { meta.dirty := false.B }
when (io.nestedwb.c_set_dirty) { meta.dirty := true.B }
when (io.nestedwb.b_toB) { meta.state := BRANCH }
when (io.nestedwb.b_toN) { meta.hit := false.B }
}
// Scheduler status
io.status.valid := request_valid
io.status.bits.set := request.set
io.status.bits.tag := request.tag
io.status.bits.way := meta.way
io.status.bits.blockB := !meta_valid || ((!w_releaseack || !w_rprobeacklast || !w_pprobeacklast) && !w_grantfirst)
io.status.bits.nestB := meta_valid && w_releaseack && w_rprobeacklast && w_pprobeacklast && !w_grantfirst
// The above rules ensure we will block and not nest an outer probe while still doing our
// own inner probes. Thus every probe wakes exactly one MSHR.
io.status.bits.blockC := !meta_valid
io.status.bits.nestC := meta_valid && (!w_rprobeackfirst || !w_pprobeackfirst || !w_grantfirst)
// The w_grantfirst in nestC is necessary to deal with:
// acquire waiting for grant, inner release gets queued, outer probe -> inner probe -> deadlock
// ... this is possible because the release+probe can be for same set, but different tag
// We can only demand: block, nest, or queue
assert (!io.status.bits.nestB || !io.status.bits.blockB)
assert (!io.status.bits.nestC || !io.status.bits.blockC)
// Scheduler requests
val no_wait = w_rprobeacklast && w_releaseack && w_grantlast && w_pprobeacklast && w_grantack
io.schedule.bits.a.valid := !s_acquire && s_release && s_pprobe
io.schedule.bits.b.valid := !s_rprobe || !s_pprobe
io.schedule.bits.c.valid := (!s_release && w_rprobeackfirst) || (!s_probeack && w_pprobeackfirst)
io.schedule.bits.d.valid := !s_execute && w_pprobeack && w_grant
io.schedule.bits.e.valid := !s_grantack && w_grantfirst
io.schedule.bits.x.valid := !s_flush && w_releaseack
io.schedule.bits.dir.valid := (!s_release && w_rprobeackfirst) || (!s_writeback && no_wait)
io.schedule.bits.reload := no_wait
io.schedule.valid := io.schedule.bits.a.valid || io.schedule.bits.b.valid || io.schedule.bits.c.valid ||
io.schedule.bits.d.valid || io.schedule.bits.e.valid || io.schedule.bits.x.valid ||
io.schedule.bits.dir.valid
// Schedule completions
when (io.schedule.ready) {
s_rprobe := true.B
when (w_rprobeackfirst) { s_release := true.B }
s_pprobe := true.B
when (s_release && s_pprobe) { s_acquire := true.B }
when (w_releaseack) { s_flush := true.B }
when (w_pprobeackfirst) { s_probeack := true.B }
when (w_grantfirst) { s_grantack := true.B }
when (w_pprobeack && w_grant) { s_execute := true.B }
when (no_wait) { s_writeback := true.B }
// Await the next operation
when (no_wait) {
request_valid := false.B
meta_valid := false.B
}
}
// Resulting meta-data
val final_meta_writeback = WireInit(meta)
val req_clientBit = params.clientBit(request.source)
val req_needT = needT(request.opcode, request.param)
val req_acquire = request.opcode === AcquireBlock || request.opcode === AcquirePerm
val meta_no_clients = !meta.clients.orR
val req_promoteT = req_acquire && Mux(meta.hit, meta_no_clients && meta.state === TIP, gotT)
when (request.prio(2) && (!params.firstLevel).B) { // always a hit
final_meta_writeback.dirty := meta.dirty || request.opcode(0)
final_meta_writeback.state := Mux(request.param =/= TtoT && meta.state === TRUNK, TIP, meta.state)
final_meta_writeback.clients := meta.clients & ~Mux(isToN(request.param), req_clientBit, 0.U)
final_meta_writeback.hit := true.B // chained requests are hits
} .elsewhen (request.control && params.control.B) { // request.prio(0)
when (meta.hit) {
final_meta_writeback.dirty := false.B
final_meta_writeback.state := INVALID
final_meta_writeback.clients := meta.clients & ~probes_toN
}
final_meta_writeback.hit := false.B
} .otherwise {
final_meta_writeback.dirty := (meta.hit && meta.dirty) || !request.opcode(2)
final_meta_writeback.state := Mux(req_needT,
Mux(req_acquire, TRUNK, TIP),
Mux(!meta.hit, Mux(gotT, Mux(req_acquire, TRUNK, TIP), BRANCH),
MuxLookup(meta.state, 0.U(2.W))(Seq(
INVALID -> BRANCH,
BRANCH -> BRANCH,
TRUNK -> TIP,
TIP -> Mux(meta_no_clients && req_acquire, TRUNK, TIP)))))
final_meta_writeback.clients := Mux(meta.hit, meta.clients & ~probes_toN, 0.U) |
Mux(req_acquire, req_clientBit, 0.U)
final_meta_writeback.tag := request.tag
final_meta_writeback.hit := true.B
}
when (bad_grant) {
when (meta.hit) {
// upgrade failed (B -> T)
assert (!meta_valid || meta.state === BRANCH)
final_meta_writeback.hit := true.B
final_meta_writeback.dirty := false.B
final_meta_writeback.state := BRANCH
final_meta_writeback.clients := meta.clients & ~probes_toN
} .otherwise {
// failed N -> (T or B)
final_meta_writeback.hit := false.B
final_meta_writeback.dirty := false.B
final_meta_writeback.state := INVALID
final_meta_writeback.clients := 0.U
}
}
val invalid = Wire(new DirectoryEntry(params))
invalid.dirty := false.B
invalid.state := INVALID
invalid.clients := 0.U
invalid.tag := 0.U
// Just because a client says BtoT, by the time we process the request he may be N.
// Therefore, we must consult our own meta-data state to confirm he owns the line still.
val honour_BtoT = meta.hit && (meta.clients & req_clientBit).orR
// The client asking us to act is proof they don't have permissions.
val excluded_client = Mux(meta.hit && request.prio(0) && skipProbeN(request.opcode, params.cache.hintsSkipProbe), req_clientBit, 0.U)
io.schedule.bits.a.bits.tag := request.tag
io.schedule.bits.a.bits.set := request.set
io.schedule.bits.a.bits.param := Mux(req_needT, Mux(meta.hit, BtoT, NtoT), NtoB)
io.schedule.bits.a.bits.block := request.size =/= log2Ceil(params.cache.blockBytes).U ||
!(request.opcode === PutFullData || request.opcode === AcquirePerm)
io.schedule.bits.a.bits.source := 0.U
io.schedule.bits.b.bits.param := Mux(!s_rprobe, toN, Mux(request.prio(1), request.param, Mux(req_needT, toN, toB)))
io.schedule.bits.b.bits.tag := Mux(!s_rprobe, meta.tag, request.tag)
io.schedule.bits.b.bits.set := request.set
io.schedule.bits.b.bits.clients := meta.clients & ~excluded_client
io.schedule.bits.c.bits.opcode := Mux(meta.dirty, ReleaseData, Release)
io.schedule.bits.c.bits.param := Mux(meta.state === BRANCH, BtoN, TtoN)
io.schedule.bits.c.bits.source := 0.U
io.schedule.bits.c.bits.tag := meta.tag
io.schedule.bits.c.bits.set := request.set
io.schedule.bits.c.bits.way := meta.way
io.schedule.bits.c.bits.dirty := meta.dirty
io.schedule.bits.d.bits.viewAsSupertype(chiselTypeOf(request)) := request
io.schedule.bits.d.bits.param := Mux(!req_acquire, request.param,
MuxLookup(request.param, request.param)(Seq(
NtoB -> Mux(req_promoteT, NtoT, NtoB),
BtoT -> Mux(honour_BtoT, BtoT, NtoT),
NtoT -> NtoT)))
io.schedule.bits.d.bits.sink := 0.U
io.schedule.bits.d.bits.way := meta.way
io.schedule.bits.d.bits.bad := bad_grant
io.schedule.bits.e.bits.sink := sink
io.schedule.bits.x.bits.fail := false.B
io.schedule.bits.dir.bits.set := request.set
io.schedule.bits.dir.bits.way := meta.way
io.schedule.bits.dir.bits.data := Mux(!s_release, invalid, WireInit(new DirectoryEntry(params), init = final_meta_writeback))
// Coverage of state transitions
def cacheState(entry: DirectoryEntry, hit: Bool) = {
val out = WireDefault(0.U)
val c = entry.clients.orR
val d = entry.dirty
switch (entry.state) {
is (BRANCH) { out := Mux(c, S_BRANCH_C.code, S_BRANCH.code) }
is (TRUNK) { out := Mux(d, S_TRUNK_CD.code, S_TRUNK_C.code) }
is (TIP) { out := Mux(c, Mux(d, S_TIP_CD.code, S_TIP_C.code), Mux(d, S_TIP_D.code, S_TIP.code)) }
is (INVALID) { out := S_INVALID.code }
}
when (!hit) { out := S_INVALID.code }
out
}
val p = !params.lastLevel // can be probed
val c = !params.firstLevel // can be acquired
val m = params.inner.client.clients.exists(!_.supports.probe) // can be written (or read)
val r = params.outer.manager.managers.exists(!_.alwaysGrantsT) // read-only devices exist
val f = params.control // flush control register exists
val cfg = (p, c, m, r, f)
val b = r || p // can reach branch state (via probe downgrade or read-only device)
// The cache must be used for something or we would not be here
require(c || m)
val evict = cacheState(meta, !meta.hit)
val before = cacheState(meta, meta.hit)
val after = cacheState(final_meta_writeback, true.B)
def eviction(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(evict === from.code, s"MSHR_${from}_EVICT", s"State transition from ${from} to evicted ${cfg}")
} else {
assert(!(evict === from.code), cf"State transition from ${from} to evicted should be impossible ${cfg}")
}
if (cover && f) {
params.ccover(before === from.code, s"MSHR_${from}_FLUSH", s"State transition from ${from} to flushed ${cfg}")
} else {
assert(!(before === from.code), cf"State transition from ${from} to flushed should be impossible ${cfg}")
}
}
def transition(from: CacheState, to: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(before === from.code && after === to.code, s"MSHR_${from}_${to}", s"State transition from ${from} to ${to} ${cfg}")
} else {
assert(!(before === from.code && after === to.code), cf"State transition from ${from} to ${to} should be impossible ${cfg}")
}
}
when ((!s_release && w_rprobeackfirst) && io.schedule.ready) {
eviction(S_BRANCH, b) // MMIO read to read-only device
eviction(S_BRANCH_C, b && c) // you need children to become C
eviction(S_TIP, true) // MMIO read || clean release can lead to this state
eviction(S_TIP_C, c) // needs two clients || client + mmio || downgrading client
eviction(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client
eviction(S_TIP_D, true) // MMIO write || dirty release lead here
eviction(S_TRUNK_C, c) // acquire for write
eviction(S_TRUNK_CD, c) // dirty release then reacquire
}
when ((!s_writeback && no_wait) && io.schedule.ready) {
transition(S_INVALID, S_BRANCH, b && m) // only MMIO can bring us to BRANCH state
transition(S_INVALID, S_BRANCH_C, b && c) // C state is only possible if there are inner caches
transition(S_INVALID, S_TIP, m) // MMIO read
transition(S_INVALID, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_INVALID, S_TIP_CD, false) // acquire does not cause dirty immediately
transition(S_INVALID, S_TIP_D, m) // MMIO write
transition(S_INVALID, S_TRUNK_C, c) // acquire
transition(S_INVALID, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH, S_INVALID, b && p) // probe can do this (flushes run as evictions)
transition(S_BRANCH, S_BRANCH_C, b && c) // acquire
transition(S_BRANCH, S_TIP, b && m) // prefetch write
transition(S_BRANCH, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_BRANCH, S_TIP_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH, S_TIP_D, b && m) // MMIO write
transition(S_BRANCH, S_TRUNK_C, b && c) // acquire
transition(S_BRANCH, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH_C, S_INVALID, b && c && p)
transition(S_BRANCH_C, S_BRANCH, b && c) // clean release (optional)
transition(S_BRANCH_C, S_TIP, b && c && m) // prefetch write
transition(S_BRANCH_C, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_BRANCH_C, S_TIP_D, b && c && m) // MMIO write
transition(S_BRANCH_C, S_TIP_CD, false) // going dirty means we must shoot down clients
transition(S_BRANCH_C, S_TRUNK_C, b && c) // acquire
transition(S_BRANCH_C, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_TIP, S_INVALID, p)
transition(S_TIP, S_BRANCH, p) // losing TIP only possible via probe
transition(S_TIP, S_BRANCH_C, false) // we would go S_TRUNK_C instead
transition(S_TIP, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP, S_TIP_D, m) // direct dirty only via MMIO write
transition(S_TIP, S_TIP_CD, false) // acquire does not make us dirty immediately
transition(S_TIP, S_TRUNK_C, c) // acquire
transition(S_TIP, S_TRUNK_CD, false) // acquire does not make us dirty immediately
transition(S_TIP_C, S_INVALID, c && p)
transition(S_TIP_C, S_BRANCH, c && p) // losing TIP only possible via probe
transition(S_TIP_C, S_BRANCH_C, c && p) // losing TIP only possible via probe
transition(S_TIP_C, S_TIP, c) // probed while MMIO read || clean release (optional)
transition(S_TIP_C, S_TIP_D, c && m) // direct dirty only via MMIO write
transition(S_TIP_C, S_TIP_CD, false) // going dirty means we must shoot down clients
transition(S_TIP_C, S_TRUNK_C, c) // acquire
transition(S_TIP_C, S_TRUNK_CD, false) // acquire does not make us immediately dirty
transition(S_TIP_D, S_INVALID, p)
transition(S_TIP_D, S_BRANCH, p) // losing D is only possible via probe
transition(S_TIP_D, S_BRANCH_C, p && c) // probed while acquire shared
transition(S_TIP_D, S_TIP, p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TIP_D, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP_D, S_TIP_CD, false) // we would go S_TRUNK_CD instead
transition(S_TIP_D, S_TRUNK_C, p && c) // probed while acquired
transition(S_TIP_D, S_TRUNK_CD, c) // acquire
transition(S_TIP_CD, S_INVALID, c && p)
transition(S_TIP_CD, S_BRANCH, c && p) // losing D is only possible via probe
transition(S_TIP_CD, S_BRANCH_C, c && p) // losing D is only possible via probe
transition(S_TIP_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TIP_CD, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP_CD, S_TIP_D, c) // MMIO write || clean release (optional)
transition(S_TIP_CD, S_TRUNK_C, c && p) // probed while acquire
transition(S_TIP_CD, S_TRUNK_CD, c) // acquire
transition(S_TRUNK_C, S_INVALID, c && p)
transition(S_TRUNK_C, S_BRANCH, c && p) // losing TIP only possible via probe
transition(S_TRUNK_C, S_BRANCH_C, c && p) // losing TIP only possible via probe
transition(S_TRUNK_C, S_TIP, c) // MMIO read || clean release (optional)
transition(S_TRUNK_C, S_TIP_C, c) // bounce shared
transition(S_TRUNK_C, S_TIP_D, c) // dirty release
transition(S_TRUNK_C, S_TIP_CD, c) // dirty bounce shared
transition(S_TRUNK_C, S_TRUNK_CD, c) // dirty bounce
transition(S_TRUNK_CD, S_INVALID, c && p)
transition(S_TRUNK_CD, S_BRANCH, c && p) // losing D only possible via probe
transition(S_TRUNK_CD, S_BRANCH_C, c && p) // losing D only possible via probe
transition(S_TRUNK_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TRUNK_CD, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TRUNK_CD, S_TIP_D, c) // dirty release
transition(S_TRUNK_CD, S_TIP_CD, c) // bounce shared
transition(S_TRUNK_CD, S_TRUNK_C, c && p) // probed while acquire
}
// Handle response messages
val probe_bit = params.clientBit(io.sinkc.bits.source)
val last_probe = (probes_done | probe_bit) === (meta.clients & ~excluded_client)
val probe_toN = isToN(io.sinkc.bits.param)
if (!params.firstLevel) when (io.sinkc.valid) {
params.ccover( probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_FULL", "Client downgraded to N when asked only to do B")
params.ccover(!probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_HALF", "Client downgraded to B when asked only to do B")
// Caution: the probe matches us only in set.
// We would never allow an outer probe to nest until both w_[rp]probeack complete, so
// it is safe to just unguardedly update the probe FSM.
probes_done := probes_done | probe_bit
probes_toN := probes_toN | Mux(probe_toN, probe_bit, 0.U)
probes_noT := probes_noT || io.sinkc.bits.param =/= TtoT
w_rprobeackfirst := w_rprobeackfirst || last_probe
w_rprobeacklast := w_rprobeacklast || (last_probe && io.sinkc.bits.last)
w_pprobeackfirst := w_pprobeackfirst || last_probe
w_pprobeacklast := w_pprobeacklast || (last_probe && io.sinkc.bits.last)
// Allow wormhole routing from sinkC if the first request beat has offset 0
val set_pprobeack = last_probe && (io.sinkc.bits.last || request.offset === 0.U)
w_pprobeack := w_pprobeack || set_pprobeack
params.ccover(!set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_SERIAL", "Sequential routing of probe response data")
params.ccover( set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_WORMHOLE", "Wormhole routing of probe response data")
// However, meta-data updates need to be done more cautiously
when (meta.state =/= INVALID && io.sinkc.bits.tag === meta.tag && io.sinkc.bits.data) { meta.dirty := true.B } // !!!
}
when (io.sinkd.valid) {
when (io.sinkd.bits.opcode === Grant || io.sinkd.bits.opcode === GrantData) {
sink := io.sinkd.bits.sink
w_grantfirst := true.B
w_grantlast := io.sinkd.bits.last
// Record if we need to prevent taking ownership
bad_grant := io.sinkd.bits.denied
// Allow wormhole routing for requests whose first beat has offset 0
w_grant := request.offset === 0.U || io.sinkd.bits.last
params.ccover(io.sinkd.bits.opcode === GrantData && request.offset === 0.U, "MSHR_GRANT_WORMHOLE", "Wormhole routing of grant response data")
params.ccover(io.sinkd.bits.opcode === GrantData && request.offset =/= 0.U, "MSHR_GRANT_SERIAL", "Sequential routing of grant response data")
gotT := io.sinkd.bits.param === toT
}
.elsewhen (io.sinkd.bits.opcode === ReleaseAck) {
w_releaseack := true.B
}
}
when (io.sinke.valid) {
w_grantack := true.B
}
// Bootstrap new requests
val allocate_as_full = WireInit(new FullRequest(params), init = io.allocate.bits)
val new_meta = Mux(io.allocate.valid && io.allocate.bits.repeat, final_meta_writeback, io.directory.bits)
val new_request = Mux(io.allocate.valid, allocate_as_full, request)
val new_needT = needT(new_request.opcode, new_request.param)
val new_clientBit = params.clientBit(new_request.source)
val new_skipProbe = Mux(skipProbeN(new_request.opcode, params.cache.hintsSkipProbe), new_clientBit, 0.U)
val prior = cacheState(final_meta_writeback, true.B)
def bypass(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(prior === from.code, s"MSHR_${from}_BYPASS", s"State bypass transition from ${from} ${cfg}")
} else {
assert(!(prior === from.code), cf"State bypass from ${from} should be impossible ${cfg}")
}
}
when (io.allocate.valid && io.allocate.bits.repeat) {
bypass(S_INVALID, f || p) // Can lose permissions (probe/flush)
bypass(S_BRANCH, b) // MMIO read to read-only device
bypass(S_BRANCH_C, b && c) // you need children to become C
bypass(S_TIP, true) // MMIO read || clean release can lead to this state
bypass(S_TIP_C, c) // needs two clients || client + mmio || downgrading client
bypass(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client
bypass(S_TIP_D, true) // MMIO write || dirty release lead here
bypass(S_TRUNK_C, c) // acquire for write
bypass(S_TRUNK_CD, c) // dirty release then reacquire
}
when (io.allocate.valid) {
assert (!request_valid || (no_wait && io.schedule.fire))
request_valid := true.B
request := io.allocate.bits
}
// Create execution plan
when (io.directory.valid || (io.allocate.valid && io.allocate.bits.repeat)) {
meta_valid := true.B
meta := new_meta
probes_done := 0.U
probes_toN := 0.U
probes_noT := false.B
gotT := false.B
bad_grant := false.B
// These should already be either true or turning true
// We clear them here explicitly to simplify the mux tree
s_rprobe := true.B
w_rprobeackfirst := true.B
w_rprobeacklast := true.B
s_release := true.B
w_releaseack := true.B
s_pprobe := true.B
s_acquire := true.B
s_flush := true.B
w_grantfirst := true.B
w_grantlast := true.B
w_grant := true.B
w_pprobeackfirst := true.B
w_pprobeacklast := true.B
w_pprobeack := true.B
s_probeack := true.B
s_grantack := true.B
s_execute := true.B
w_grantack := true.B
s_writeback := true.B
// For C channel requests (ie: Release[Data])
when (new_request.prio(2) && (!params.firstLevel).B) {
s_execute := false.B
// Do we need to go dirty?
when (new_request.opcode(0) && !new_meta.dirty) {
s_writeback := false.B
}
// Does our state change?
when (isToB(new_request.param) && new_meta.state === TRUNK) {
s_writeback := false.B
}
// Do our clients change?
when (isToN(new_request.param) && (new_meta.clients & new_clientBit) =/= 0.U) {
s_writeback := false.B
}
assert (new_meta.hit)
}
// For X channel requests (ie: flush)
.elsewhen (new_request.control && params.control.B) { // new_request.prio(0)
s_flush := false.B
// Do we need to actually do something?
when (new_meta.hit) {
s_release := false.B
w_releaseack := false.B
// Do we need to shoot-down inner caches?
when ((!params.firstLevel).B && (new_meta.clients =/= 0.U)) {
s_rprobe := false.B
w_rprobeackfirst := false.B
w_rprobeacklast := false.B
}
}
}
// For A channel requests
.otherwise { // new_request.prio(0) && !new_request.control
s_execute := false.B
// Do we need an eviction?
when (!new_meta.hit && new_meta.state =/= INVALID) {
s_release := false.B
w_releaseack := false.B
// Do we need to shoot-down inner caches?
when ((!params.firstLevel).B & (new_meta.clients =/= 0.U)) {
s_rprobe := false.B
w_rprobeackfirst := false.B
w_rprobeacklast := false.B
}
}
// Do we need an acquire?
when (!new_meta.hit || (new_meta.state === BRANCH && new_needT)) {
s_acquire := false.B
w_grantfirst := false.B
w_grantlast := false.B
w_grant := false.B
s_grantack := false.B
s_writeback := false.B
}
// Do we need a probe?
when ((!params.firstLevel).B && (new_meta.hit &&
(new_needT || new_meta.state === TRUNK) &&
(new_meta.clients & ~new_skipProbe) =/= 0.U)) {
s_pprobe := false.B
w_pprobeackfirst := false.B
w_pprobeacklast := false.B
w_pprobeack := false.B
s_writeback := false.B
}
// Do we need a grantack?
when (new_request.opcode === AcquireBlock || new_request.opcode === AcquirePerm) {
w_grantack := false.B
s_writeback := false.B
}
// Becomes dirty?
when (!new_request.opcode(2) && new_meta.hit && !new_meta.dirty) {
s_writeback := false.B
}
}
}
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
| module MSHR_2( // @[MSHR.scala:84:7]
input clock, // @[MSHR.scala:84:7]
input reset, // @[MSHR.scala:84:7]
input io_allocate_valid, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_0, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_1, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_2, // @[MSHR.scala:86:14]
input io_allocate_bits_control, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_opcode, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_param, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_size, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_source, // @[MSHR.scala:86:14]
input [12:0] io_allocate_bits_tag, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_offset, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_put, // @[MSHR.scala:86:14]
input [9:0] io_allocate_bits_set, // @[MSHR.scala:86:14]
input io_allocate_bits_repeat, // @[MSHR.scala:86:14]
input io_directory_valid, // @[MSHR.scala:86:14]
input io_directory_bits_dirty, // @[MSHR.scala:86:14]
input [1:0] io_directory_bits_state, // @[MSHR.scala:86:14]
input io_directory_bits_clients, // @[MSHR.scala:86:14]
input [12:0] io_directory_bits_tag, // @[MSHR.scala:86:14]
input io_directory_bits_hit, // @[MSHR.scala:86:14]
input [2:0] io_directory_bits_way, // @[MSHR.scala:86:14]
output io_status_valid, // @[MSHR.scala:86:14]
output [9:0] io_status_bits_set, // @[MSHR.scala:86:14]
output [12:0] io_status_bits_tag, // @[MSHR.scala:86:14]
output [2:0] io_status_bits_way, // @[MSHR.scala:86:14]
output io_status_bits_blockB, // @[MSHR.scala:86:14]
output io_status_bits_nestB, // @[MSHR.scala:86:14]
output io_status_bits_blockC, // @[MSHR.scala:86:14]
output io_status_bits_nestC, // @[MSHR.scala:86:14]
input io_schedule_ready, // @[MSHR.scala:86:14]
output io_schedule_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_a_valid, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_a_bits_tag, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_a_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_a_bits_param, // @[MSHR.scala:86:14]
output io_schedule_bits_a_bits_block, // @[MSHR.scala:86:14]
output io_schedule_bits_b_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_b_bits_param, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_b_bits_tag, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_b_bits_set, // @[MSHR.scala:86:14]
output io_schedule_bits_b_bits_clients, // @[MSHR.scala:86:14]
output io_schedule_bits_c_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_opcode, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_param, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_c_bits_tag, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_c_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_c_bits_dirty, // @[MSHR.scala:86:14]
output io_schedule_bits_d_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_0, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_1, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_2, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_control, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_opcode, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_param, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_size, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_source, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_d_bits_tag, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_offset, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_put, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_d_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_bad, // @[MSHR.scala:86:14]
output io_schedule_bits_e_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_e_bits_sink, // @[MSHR.scala:86:14]
output io_schedule_bits_x_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_valid, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_dir_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_dir_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_bits_data_dirty, // @[MSHR.scala:86:14]
output [1:0] io_schedule_bits_dir_bits_data_state, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_bits_data_clients, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_dir_bits_data_tag, // @[MSHR.scala:86:14]
output io_schedule_bits_reload, // @[MSHR.scala:86:14]
input io_sinkc_valid, // @[MSHR.scala:86:14]
input io_sinkc_bits_last, // @[MSHR.scala:86:14]
input [9:0] io_sinkc_bits_set, // @[MSHR.scala:86:14]
input [12:0] io_sinkc_bits_tag, // @[MSHR.scala:86:14]
input [5:0] io_sinkc_bits_source, // @[MSHR.scala:86:14]
input [2:0] io_sinkc_bits_param, // @[MSHR.scala:86:14]
input io_sinkc_bits_data, // @[MSHR.scala:86:14]
input io_sinkd_valid, // @[MSHR.scala:86:14]
input io_sinkd_bits_last, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_opcode, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_param, // @[MSHR.scala:86:14]
input [3:0] io_sinkd_bits_source, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_sink, // @[MSHR.scala:86:14]
input io_sinkd_bits_denied, // @[MSHR.scala:86:14]
input io_sinke_valid, // @[MSHR.scala:86:14]
input [3:0] io_sinke_bits_sink, // @[MSHR.scala:86:14]
input [9:0] io_nestedwb_set, // @[MSHR.scala:86:14]
input [12:0] io_nestedwb_tag, // @[MSHR.scala:86:14]
input io_nestedwb_b_toN, // @[MSHR.scala:86:14]
input io_nestedwb_b_toB, // @[MSHR.scala:86:14]
input io_nestedwb_b_clr_dirty, // @[MSHR.scala:86:14]
input io_nestedwb_c_set_dirty // @[MSHR.scala:86:14]
);
wire [12:0] final_meta_writeback_tag; // @[MSHR.scala:215:38]
wire final_meta_writeback_clients; // @[MSHR.scala:215:38]
wire [1:0] final_meta_writeback_state; // @[MSHR.scala:215:38]
wire final_meta_writeback_dirty; // @[MSHR.scala:215:38]
wire io_allocate_valid_0 = io_allocate_valid; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_0_0 = io_allocate_bits_prio_0; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_1_0 = io_allocate_bits_prio_1; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_2_0 = io_allocate_bits_prio_2; // @[MSHR.scala:84:7]
wire io_allocate_bits_control_0 = io_allocate_bits_control; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_opcode_0 = io_allocate_bits_opcode; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_param_0 = io_allocate_bits_param; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_size_0 = io_allocate_bits_size; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_source_0 = io_allocate_bits_source; // @[MSHR.scala:84:7]
wire [12:0] io_allocate_bits_tag_0 = io_allocate_bits_tag; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_offset_0 = io_allocate_bits_offset; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_put_0 = io_allocate_bits_put; // @[MSHR.scala:84:7]
wire [9:0] io_allocate_bits_set_0 = io_allocate_bits_set; // @[MSHR.scala:84:7]
wire io_allocate_bits_repeat_0 = io_allocate_bits_repeat; // @[MSHR.scala:84:7]
wire io_directory_valid_0 = io_directory_valid; // @[MSHR.scala:84:7]
wire io_directory_bits_dirty_0 = io_directory_bits_dirty; // @[MSHR.scala:84:7]
wire [1:0] io_directory_bits_state_0 = io_directory_bits_state; // @[MSHR.scala:84:7]
wire io_directory_bits_clients_0 = io_directory_bits_clients; // @[MSHR.scala:84:7]
wire [12:0] io_directory_bits_tag_0 = io_directory_bits_tag; // @[MSHR.scala:84:7]
wire io_directory_bits_hit_0 = io_directory_bits_hit; // @[MSHR.scala:84:7]
wire [2:0] io_directory_bits_way_0 = io_directory_bits_way; // @[MSHR.scala:84:7]
wire io_schedule_ready_0 = io_schedule_ready; // @[MSHR.scala:84:7]
wire io_sinkc_valid_0 = io_sinkc_valid; // @[MSHR.scala:84:7]
wire io_sinkc_bits_last_0 = io_sinkc_bits_last; // @[MSHR.scala:84:7]
wire [9:0] io_sinkc_bits_set_0 = io_sinkc_bits_set; // @[MSHR.scala:84:7]
wire [12:0] io_sinkc_bits_tag_0 = io_sinkc_bits_tag; // @[MSHR.scala:84:7]
wire [5:0] io_sinkc_bits_source_0 = io_sinkc_bits_source; // @[MSHR.scala:84:7]
wire [2:0] io_sinkc_bits_param_0 = io_sinkc_bits_param; // @[MSHR.scala:84:7]
wire io_sinkc_bits_data_0 = io_sinkc_bits_data; // @[MSHR.scala:84:7]
wire io_sinkd_valid_0 = io_sinkd_valid; // @[MSHR.scala:84:7]
wire io_sinkd_bits_last_0 = io_sinkd_bits_last; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_opcode_0 = io_sinkd_bits_opcode; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_param_0 = io_sinkd_bits_param; // @[MSHR.scala:84:7]
wire [3:0] io_sinkd_bits_source_0 = io_sinkd_bits_source; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_sink_0 = io_sinkd_bits_sink; // @[MSHR.scala:84:7]
wire io_sinkd_bits_denied_0 = io_sinkd_bits_denied; // @[MSHR.scala:84:7]
wire io_sinke_valid_0 = io_sinke_valid; // @[MSHR.scala:84:7]
wire [3:0] io_sinke_bits_sink_0 = io_sinke_bits_sink; // @[MSHR.scala:84:7]
wire [9:0] io_nestedwb_set_0 = io_nestedwb_set; // @[MSHR.scala:84:7]
wire [12:0] io_nestedwb_tag_0 = io_nestedwb_tag; // @[MSHR.scala:84:7]
wire io_nestedwb_b_toN_0 = io_nestedwb_b_toN; // @[MSHR.scala:84:7]
wire io_nestedwb_b_toB_0 = io_nestedwb_b_toB; // @[MSHR.scala:84:7]
wire io_nestedwb_b_clr_dirty_0 = io_nestedwb_b_clr_dirty; // @[MSHR.scala:84:7]
wire io_nestedwb_c_set_dirty_0 = io_nestedwb_c_set_dirty; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_a_bits_source = 4'h0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_c_bits_source = 4'h0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_d_bits_sink = 4'h0; // @[MSHR.scala:84:7]
wire io_schedule_bits_x_bits_fail = 1'h0; // @[MSHR.scala:84:7]
wire _io_schedule_bits_c_valid_T_2 = 1'h0; // @[MSHR.scala:186:68]
wire _io_schedule_bits_c_valid_T_3 = 1'h0; // @[MSHR.scala:186:80]
wire invalid_dirty = 1'h0; // @[MSHR.scala:268:21]
wire invalid_clients = 1'h0; // @[MSHR.scala:268:21]
wire _excluded_client_T_7 = 1'h0; // @[Parameters.scala:279:137]
wire _after_T_4 = 1'h0; // @[MSHR.scala:323:11]
wire _new_skipProbe_T_6 = 1'h0; // @[Parameters.scala:279:137]
wire _prior_T_4 = 1'h0; // @[MSHR.scala:323:11]
wire _req_clientBit_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire _probe_bit_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire _new_clientBit_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire [12:0] invalid_tag = 13'h0; // @[MSHR.scala:268:21]
wire [1:0] invalid_state = 2'h0; // @[MSHR.scala:268:21]
wire [1:0] _final_meta_writeback_state_T_11 = 2'h1; // @[MSHR.scala:240:70]
wire allocate_as_full_prio_0 = io_allocate_bits_prio_0_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_prio_1 = io_allocate_bits_prio_1_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_prio_2 = io_allocate_bits_prio_2_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_control = io_allocate_bits_control_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_opcode = io_allocate_bits_opcode_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_param = io_allocate_bits_param_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_size = io_allocate_bits_size_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_source = io_allocate_bits_source_0; // @[MSHR.scala:84:7, :504:34]
wire [12:0] allocate_as_full_tag = io_allocate_bits_tag_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_offset = io_allocate_bits_offset_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_put = io_allocate_bits_put_0; // @[MSHR.scala:84:7, :504:34]
wire [9:0] allocate_as_full_set = io_allocate_bits_set_0; // @[MSHR.scala:84:7, :504:34]
wire _io_status_bits_blockB_T_8; // @[MSHR.scala:168:40]
wire _io_status_bits_nestB_T_4; // @[MSHR.scala:169:93]
wire _io_status_bits_blockC_T; // @[MSHR.scala:172:28]
wire _io_status_bits_nestC_T_5; // @[MSHR.scala:173:39]
wire _io_schedule_valid_T_5; // @[MSHR.scala:193:105]
wire _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:184:55]
wire _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:283:91]
wire _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:185:41]
wire [2:0] _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:286:41]
wire [12:0] _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:287:41]
wire _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:289:51]
wire _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:186:64]
wire [2:0] _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:290:41]
wire [2:0] _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:291:41]
wire _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:187:57]
wire [2:0] _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:298:41]
wire _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:188:43]
wire _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:189:40]
wire _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:190:66]
wire _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:310:41]
wire [1:0] _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:310:41]
wire _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:310:41]
wire [12:0] _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:310:41]
wire no_wait; // @[MSHR.scala:183:83]
wire [5:0] _probe_bit_uncommonBits_T = io_sinkc_bits_source_0; // @[Parameters.scala:52:29]
wire [9:0] io_status_bits_set_0; // @[MSHR.scala:84:7]
wire [12:0] io_status_bits_tag_0; // @[MSHR.scala:84:7]
wire [2:0] io_status_bits_way_0; // @[MSHR.scala:84:7]
wire io_status_bits_blockB_0; // @[MSHR.scala:84:7]
wire io_status_bits_nestB_0; // @[MSHR.scala:84:7]
wire io_status_bits_blockC_0; // @[MSHR.scala:84:7]
wire io_status_bits_nestC_0; // @[MSHR.scala:84:7]
wire io_status_valid_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_a_bits_tag_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_a_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_a_bits_param_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_a_bits_block_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_a_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_b_bits_param_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_b_bits_tag_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_b_bits_set_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_b_bits_clients_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_opcode_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_param_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_c_bits_tag_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_c_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_c_bits_dirty_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_0_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_1_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_2_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_control_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_opcode_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_param_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_size_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_source_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_d_bits_tag_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_offset_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_put_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_d_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_bad_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_e_bits_sink_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_bits_data_dirty_0; // @[MSHR.scala:84:7]
wire [1:0] io_schedule_bits_dir_bits_data_state_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_bits_data_clients_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_dir_bits_data_tag_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_dir_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_dir_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_reload_0; // @[MSHR.scala:84:7]
wire io_schedule_valid_0; // @[MSHR.scala:84:7]
reg request_valid; // @[MSHR.scala:97:30]
assign io_status_valid_0 = request_valid; // @[MSHR.scala:84:7, :97:30]
reg request_prio_0; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_0_0 = request_prio_0; // @[MSHR.scala:84:7, :98:20]
reg request_prio_1; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_1_0 = request_prio_1; // @[MSHR.scala:84:7, :98:20]
reg request_prio_2; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_2_0 = request_prio_2; // @[MSHR.scala:84:7, :98:20]
reg request_control; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_control_0 = request_control; // @[MSHR.scala:84:7, :98:20]
reg [2:0] request_opcode; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_opcode_0 = request_opcode; // @[MSHR.scala:84:7, :98:20]
reg [2:0] request_param; // @[MSHR.scala:98:20]
reg [2:0] request_size; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_size_0 = request_size; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_source; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_source_0 = request_source; // @[MSHR.scala:84:7, :98:20]
wire [5:0] _req_clientBit_uncommonBits_T = request_source; // @[Parameters.scala:52:29]
reg [12:0] request_tag; // @[MSHR.scala:98:20]
assign io_status_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_a_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_d_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_offset; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_offset_0 = request_offset; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_put; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_put_0 = request_put; // @[MSHR.scala:84:7, :98:20]
reg [9:0] request_set; // @[MSHR.scala:98:20]
assign io_status_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_a_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_b_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_c_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_d_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_dir_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
reg meta_valid; // @[MSHR.scala:99:27]
reg meta_dirty; // @[MSHR.scala:100:17]
assign io_schedule_bits_c_bits_dirty_0 = meta_dirty; // @[MSHR.scala:84:7, :100:17]
reg [1:0] meta_state; // @[MSHR.scala:100:17]
reg meta_clients; // @[MSHR.scala:100:17]
wire _meta_no_clients_T = meta_clients; // @[MSHR.scala:100:17, :220:39]
wire evict_c = meta_clients; // @[MSHR.scala:100:17, :315:27]
wire before_c = meta_clients; // @[MSHR.scala:100:17, :315:27]
reg [12:0] meta_tag; // @[MSHR.scala:100:17]
assign io_schedule_bits_c_bits_tag_0 = meta_tag; // @[MSHR.scala:84:7, :100:17]
reg meta_hit; // @[MSHR.scala:100:17]
reg [2:0] meta_way; // @[MSHR.scala:100:17]
assign io_status_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_c_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_d_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_dir_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
wire [2:0] final_meta_writeback_way = meta_way; // @[MSHR.scala:100:17, :215:38]
reg s_rprobe; // @[MSHR.scala:121:33]
reg w_rprobeackfirst; // @[MSHR.scala:122:33]
reg w_rprobeacklast; // @[MSHR.scala:123:33]
reg s_release; // @[MSHR.scala:124:33]
reg w_releaseack; // @[MSHR.scala:125:33]
reg s_pprobe; // @[MSHR.scala:126:33]
reg s_acquire; // @[MSHR.scala:127:33]
reg s_flush; // @[MSHR.scala:128:33]
reg w_grantfirst; // @[MSHR.scala:129:33]
reg w_grantlast; // @[MSHR.scala:130:33]
reg w_grant; // @[MSHR.scala:131:33]
reg w_pprobeackfirst; // @[MSHR.scala:132:33]
reg w_pprobeacklast; // @[MSHR.scala:133:33]
reg w_pprobeack; // @[MSHR.scala:134:33]
reg s_grantack; // @[MSHR.scala:136:33]
reg s_execute; // @[MSHR.scala:137:33]
reg w_grantack; // @[MSHR.scala:138:33]
reg s_writeback; // @[MSHR.scala:139:33]
reg [2:0] sink; // @[MSHR.scala:147:17]
assign io_schedule_bits_e_bits_sink_0 = sink; // @[MSHR.scala:84:7, :147:17]
reg gotT; // @[MSHR.scala:148:17]
reg bad_grant; // @[MSHR.scala:149:22]
assign io_schedule_bits_d_bits_bad_0 = bad_grant; // @[MSHR.scala:84:7, :149:22]
reg probes_done; // @[MSHR.scala:150:24]
reg probes_toN; // @[MSHR.scala:151:23]
reg probes_noT; // @[MSHR.scala:152:23]
wire _io_status_bits_blockB_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28]
wire _io_status_bits_blockB_T_1 = ~w_releaseack; // @[MSHR.scala:125:33, :168:45]
wire _io_status_bits_blockB_T_2 = ~w_rprobeacklast; // @[MSHR.scala:123:33, :168:62]
wire _io_status_bits_blockB_T_3 = _io_status_bits_blockB_T_1 | _io_status_bits_blockB_T_2; // @[MSHR.scala:168:{45,59,62}]
wire _io_status_bits_blockB_T_4 = ~w_pprobeacklast; // @[MSHR.scala:133:33, :168:82]
wire _io_status_bits_blockB_T_5 = _io_status_bits_blockB_T_3 | _io_status_bits_blockB_T_4; // @[MSHR.scala:168:{59,79,82}]
wire _io_status_bits_blockB_T_6 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103]
wire _io_status_bits_blockB_T_7 = _io_status_bits_blockB_T_5 & _io_status_bits_blockB_T_6; // @[MSHR.scala:168:{79,100,103}]
assign _io_status_bits_blockB_T_8 = _io_status_bits_blockB_T | _io_status_bits_blockB_T_7; // @[MSHR.scala:168:{28,40,100}]
assign io_status_bits_blockB_0 = _io_status_bits_blockB_T_8; // @[MSHR.scala:84:7, :168:40]
wire _io_status_bits_nestB_T = meta_valid & w_releaseack; // @[MSHR.scala:99:27, :125:33, :169:39]
wire _io_status_bits_nestB_T_1 = _io_status_bits_nestB_T & w_rprobeacklast; // @[MSHR.scala:123:33, :169:{39,55}]
wire _io_status_bits_nestB_T_2 = _io_status_bits_nestB_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :169:{55,74}]
wire _io_status_bits_nestB_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :169:96]
assign _io_status_bits_nestB_T_4 = _io_status_bits_nestB_T_2 & _io_status_bits_nestB_T_3; // @[MSHR.scala:169:{74,93,96}]
assign io_status_bits_nestB_0 = _io_status_bits_nestB_T_4; // @[MSHR.scala:84:7, :169:93]
assign _io_status_bits_blockC_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28, :172:28]
assign io_status_bits_blockC_0 = _io_status_bits_blockC_T; // @[MSHR.scala:84:7, :172:28]
wire _io_status_bits_nestC_T = ~w_rprobeackfirst; // @[MSHR.scala:122:33, :173:43]
wire _io_status_bits_nestC_T_1 = ~w_pprobeackfirst; // @[MSHR.scala:132:33, :173:64]
wire _io_status_bits_nestC_T_2 = _io_status_bits_nestC_T | _io_status_bits_nestC_T_1; // @[MSHR.scala:173:{43,61,64}]
wire _io_status_bits_nestC_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :173:85]
wire _io_status_bits_nestC_T_4 = _io_status_bits_nestC_T_2 | _io_status_bits_nestC_T_3; // @[MSHR.scala:173:{61,82,85}]
assign _io_status_bits_nestC_T_5 = meta_valid & _io_status_bits_nestC_T_4; // @[MSHR.scala:99:27, :173:{39,82}]
assign io_status_bits_nestC_0 = _io_status_bits_nestC_T_5; // @[MSHR.scala:84:7, :173:39]
wire _no_wait_T = w_rprobeacklast & w_releaseack; // @[MSHR.scala:123:33, :125:33, :183:33]
wire _no_wait_T_1 = _no_wait_T & w_grantlast; // @[MSHR.scala:130:33, :183:{33,49}]
wire _no_wait_T_2 = _no_wait_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :183:{49,64}]
assign no_wait = _no_wait_T_2 & w_grantack; // @[MSHR.scala:138:33, :183:{64,83}]
assign io_schedule_bits_reload_0 = no_wait; // @[MSHR.scala:84:7, :183:83]
wire _io_schedule_bits_a_valid_T = ~s_acquire; // @[MSHR.scala:127:33, :184:31]
wire _io_schedule_bits_a_valid_T_1 = _io_schedule_bits_a_valid_T & s_release; // @[MSHR.scala:124:33, :184:{31,42}]
assign _io_schedule_bits_a_valid_T_2 = _io_schedule_bits_a_valid_T_1 & s_pprobe; // @[MSHR.scala:126:33, :184:{42,55}]
assign io_schedule_bits_a_valid_0 = _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:84:7, :184:55]
wire _io_schedule_bits_b_valid_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31]
wire _io_schedule_bits_b_valid_T_1 = ~s_pprobe; // @[MSHR.scala:126:33, :185:44]
assign _io_schedule_bits_b_valid_T_2 = _io_schedule_bits_b_valid_T | _io_schedule_bits_b_valid_T_1; // @[MSHR.scala:185:{31,41,44}]
assign io_schedule_bits_b_valid_0 = _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:84:7, :185:41]
wire _io_schedule_bits_c_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32]
wire _io_schedule_bits_c_valid_T_1 = _io_schedule_bits_c_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :186:{32,43}]
assign _io_schedule_bits_c_valid_T_4 = _io_schedule_bits_c_valid_T_1; // @[MSHR.scala:186:{43,64}]
assign io_schedule_bits_c_valid_0 = _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:84:7, :186:64]
wire _io_schedule_bits_d_valid_T = ~s_execute; // @[MSHR.scala:137:33, :187:31]
wire _io_schedule_bits_d_valid_T_1 = _io_schedule_bits_d_valid_T & w_pprobeack; // @[MSHR.scala:134:33, :187:{31,42}]
assign _io_schedule_bits_d_valid_T_2 = _io_schedule_bits_d_valid_T_1 & w_grant; // @[MSHR.scala:131:33, :187:{42,57}]
assign io_schedule_bits_d_valid_0 = _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:84:7, :187:57]
wire _io_schedule_bits_e_valid_T = ~s_grantack; // @[MSHR.scala:136:33, :188:31]
assign _io_schedule_bits_e_valid_T_1 = _io_schedule_bits_e_valid_T & w_grantfirst; // @[MSHR.scala:129:33, :188:{31,43}]
assign io_schedule_bits_e_valid_0 = _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:84:7, :188:43]
wire _io_schedule_bits_x_valid_T = ~s_flush; // @[MSHR.scala:128:33, :189:31]
assign _io_schedule_bits_x_valid_T_1 = _io_schedule_bits_x_valid_T & w_releaseack; // @[MSHR.scala:125:33, :189:{31,40}]
assign io_schedule_bits_x_valid_0 = _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:84:7, :189:40]
wire _io_schedule_bits_dir_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :190:34]
wire _io_schedule_bits_dir_valid_T_1 = _io_schedule_bits_dir_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :190:{34,45}]
wire _io_schedule_bits_dir_valid_T_2 = ~s_writeback; // @[MSHR.scala:139:33, :190:70]
wire _io_schedule_bits_dir_valid_T_3 = _io_schedule_bits_dir_valid_T_2 & no_wait; // @[MSHR.scala:183:83, :190:{70,83}]
assign _io_schedule_bits_dir_valid_T_4 = _io_schedule_bits_dir_valid_T_1 | _io_schedule_bits_dir_valid_T_3; // @[MSHR.scala:190:{45,66,83}]
assign io_schedule_bits_dir_valid_0 = _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:84:7, :190:66]
wire _io_schedule_valid_T = io_schedule_bits_a_valid_0 | io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7, :192:49]
wire _io_schedule_valid_T_1 = _io_schedule_valid_T | io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7, :192:{49,77}]
wire _io_schedule_valid_T_2 = _io_schedule_valid_T_1 | io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7, :192:{77,105}]
wire _io_schedule_valid_T_3 = _io_schedule_valid_T_2 | io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7, :192:105, :193:49]
wire _io_schedule_valid_T_4 = _io_schedule_valid_T_3 | io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7, :193:{49,77}]
assign _io_schedule_valid_T_5 = _io_schedule_valid_T_4 | io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7, :193:{77,105}]
assign io_schedule_valid_0 = _io_schedule_valid_T_5; // @[MSHR.scala:84:7, :193:105]
wire _io_schedule_bits_dir_bits_data_WIRE_dirty = final_meta_writeback_dirty; // @[MSHR.scala:215:38, :310:71]
wire [1:0] _io_schedule_bits_dir_bits_data_WIRE_state = final_meta_writeback_state; // @[MSHR.scala:215:38, :310:71]
wire _io_schedule_bits_dir_bits_data_WIRE_clients = final_meta_writeback_clients; // @[MSHR.scala:215:38, :310:71]
wire after_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27]
wire prior_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27]
wire [12:0] _io_schedule_bits_dir_bits_data_WIRE_tag = final_meta_writeback_tag; // @[MSHR.scala:215:38, :310:71]
wire final_meta_writeback_hit; // @[MSHR.scala:215:38]
wire [2:0] req_clientBit_uncommonBits = _req_clientBit_uncommonBits_T[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] _req_clientBit_T = request_source[5:3]; // @[Parameters.scala:54:10]
wire _req_clientBit_T_1 = _req_clientBit_T == 3'h4; // @[Parameters.scala:54:{10,32}]
wire _req_clientBit_T_3 = _req_clientBit_T_1; // @[Parameters.scala:54:{32,67}]
wire _req_clientBit_T_4 = req_clientBit_uncommonBits < 3'h5; // @[Parameters.scala:52:56, :57:20]
wire req_clientBit = _req_clientBit_T_3 & _req_clientBit_T_4; // @[Parameters.scala:54:67, :56:48, :57:20]
wire _req_needT_T = request_opcode[2]; // @[Parameters.scala:269:12]
wire _final_meta_writeback_dirty_T_3 = request_opcode[2]; // @[Parameters.scala:269:12]
wire _req_needT_T_1 = ~_req_needT_T; // @[Parameters.scala:269:{5,12}]
wire _GEN = request_opcode == 3'h5; // @[Parameters.scala:270:13]
wire _req_needT_T_2; // @[Parameters.scala:270:13]
assign _req_needT_T_2 = _GEN; // @[Parameters.scala:270:13]
wire _excluded_client_T_6; // @[Parameters.scala:279:117]
assign _excluded_client_T_6 = _GEN; // @[Parameters.scala:270:13, :279:117]
wire _GEN_0 = request_param == 3'h1; // @[Parameters.scala:270:42]
wire _req_needT_T_3; // @[Parameters.scala:270:42]
assign _req_needT_T_3 = _GEN_0; // @[Parameters.scala:270:42]
wire _final_meta_writeback_clients_T; // @[Parameters.scala:282:11]
assign _final_meta_writeback_clients_T = _GEN_0; // @[Parameters.scala:270:42, :282:11]
wire _io_schedule_bits_d_bits_param_T_7; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_7 = _GEN_0; // @[Parameters.scala:270:42]
wire _req_needT_T_4 = _req_needT_T_2 & _req_needT_T_3; // @[Parameters.scala:270:{13,33,42}]
wire _req_needT_T_5 = _req_needT_T_1 | _req_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33]
wire _GEN_1 = request_opcode == 3'h6; // @[Parameters.scala:271:14]
wire _req_needT_T_6; // @[Parameters.scala:271:14]
assign _req_needT_T_6 = _GEN_1; // @[Parameters.scala:271:14]
wire _req_acquire_T; // @[MSHR.scala:219:36]
assign _req_acquire_T = _GEN_1; // @[Parameters.scala:271:14]
wire _excluded_client_T_1; // @[Parameters.scala:279:12]
assign _excluded_client_T_1 = _GEN_1; // @[Parameters.scala:271:14, :279:12]
wire _req_needT_T_7 = &request_opcode; // @[Parameters.scala:271:52]
wire _req_needT_T_8 = _req_needT_T_6 | _req_needT_T_7; // @[Parameters.scala:271:{14,42,52}]
wire _req_needT_T_9 = |request_param; // @[Parameters.scala:271:89]
wire _req_needT_T_10 = _req_needT_T_8 & _req_needT_T_9; // @[Parameters.scala:271:{42,80,89}]
wire req_needT = _req_needT_T_5 | _req_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80]
wire _req_acquire_T_1 = &request_opcode; // @[Parameters.scala:271:52]
wire req_acquire = _req_acquire_T | _req_acquire_T_1; // @[MSHR.scala:219:{36,53,71}]
wire meta_no_clients = ~_meta_no_clients_T; // @[MSHR.scala:220:{25,39}]
wire _req_promoteT_T = &meta_state; // @[MSHR.scala:100:17, :221:81]
wire _req_promoteT_T_1 = meta_no_clients & _req_promoteT_T; // @[MSHR.scala:220:25, :221:{67,81}]
wire _req_promoteT_T_2 = meta_hit ? _req_promoteT_T_1 : gotT; // @[MSHR.scala:100:17, :148:17, :221:{40,67}]
wire req_promoteT = req_acquire & _req_promoteT_T_2; // @[MSHR.scala:219:53, :221:{34,40}]
wire _final_meta_writeback_dirty_T = request_opcode[0]; // @[MSHR.scala:98:20, :224:65]
wire _final_meta_writeback_dirty_T_1 = meta_dirty | _final_meta_writeback_dirty_T; // @[MSHR.scala:100:17, :224:{48,65}]
wire _final_meta_writeback_state_T = request_param != 3'h3; // @[MSHR.scala:98:20, :225:55]
wire _GEN_2 = meta_state == 2'h2; // @[MSHR.scala:100:17, :225:78]
wire _final_meta_writeback_state_T_1; // @[MSHR.scala:225:78]
assign _final_meta_writeback_state_T_1 = _GEN_2; // @[MSHR.scala:225:78]
wire _final_meta_writeback_state_T_12; // @[MSHR.scala:240:70]
assign _final_meta_writeback_state_T_12 = _GEN_2; // @[MSHR.scala:225:78, :240:70]
wire _evict_T_2; // @[MSHR.scala:317:26]
assign _evict_T_2 = _GEN_2; // @[MSHR.scala:225:78, :317:26]
wire _before_T_1; // @[MSHR.scala:317:26]
assign _before_T_1 = _GEN_2; // @[MSHR.scala:225:78, :317:26]
wire _final_meta_writeback_state_T_2 = _final_meta_writeback_state_T & _final_meta_writeback_state_T_1; // @[MSHR.scala:225:{55,64,78}]
wire [1:0] _final_meta_writeback_state_T_3 = _final_meta_writeback_state_T_2 ? 2'h3 : meta_state; // @[MSHR.scala:100:17, :225:{40,64}]
wire _GEN_3 = request_param == 3'h2; // @[Parameters.scala:282:43]
wire _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:43]
assign _final_meta_writeback_clients_T_1 = _GEN_3; // @[Parameters.scala:282:43]
wire _io_schedule_bits_d_bits_param_T_5; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_5 = _GEN_3; // @[Parameters.scala:282:43]
wire _final_meta_writeback_clients_T_2 = _final_meta_writeback_clients_T | _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:{11,34,43}]
wire _final_meta_writeback_clients_T_3 = request_param == 3'h5; // @[Parameters.scala:282:75]
wire _final_meta_writeback_clients_T_4 = _final_meta_writeback_clients_T_2 | _final_meta_writeback_clients_T_3; // @[Parameters.scala:282:{34,66,75}]
wire _final_meta_writeback_clients_T_5 = _final_meta_writeback_clients_T_4 & req_clientBit; // @[Parameters.scala:56:48]
wire _final_meta_writeback_clients_T_6 = ~_final_meta_writeback_clients_T_5; // @[MSHR.scala:226:{52,56}]
wire _final_meta_writeback_clients_T_7 = meta_clients & _final_meta_writeback_clients_T_6; // @[MSHR.scala:100:17, :226:{50,52}]
wire _final_meta_writeback_clients_T_8 = ~probes_toN; // @[MSHR.scala:151:23, :232:54]
wire _final_meta_writeback_clients_T_9 = meta_clients & _final_meta_writeback_clients_T_8; // @[MSHR.scala:100:17, :232:{52,54}]
wire _final_meta_writeback_dirty_T_2 = meta_hit & meta_dirty; // @[MSHR.scala:100:17, :236:45]
wire _final_meta_writeback_dirty_T_4 = ~_final_meta_writeback_dirty_T_3; // @[MSHR.scala:236:{63,78}]
wire _final_meta_writeback_dirty_T_5 = _final_meta_writeback_dirty_T_2 | _final_meta_writeback_dirty_T_4; // @[MSHR.scala:236:{45,60,63}]
wire [1:0] _GEN_4 = {1'h1, ~req_acquire}; // @[MSHR.scala:219:53, :238:40]
wire [1:0] _final_meta_writeback_state_T_4; // @[MSHR.scala:238:40]
assign _final_meta_writeback_state_T_4 = _GEN_4; // @[MSHR.scala:238:40]
wire [1:0] _final_meta_writeback_state_T_6; // @[MSHR.scala:239:65]
assign _final_meta_writeback_state_T_6 = _GEN_4; // @[MSHR.scala:238:40, :239:65]
wire _final_meta_writeback_state_T_5 = ~meta_hit; // @[MSHR.scala:100:17, :239:41]
wire [1:0] _final_meta_writeback_state_T_7 = gotT ? _final_meta_writeback_state_T_6 : 2'h1; // @[MSHR.scala:148:17, :239:{55,65}]
wire _final_meta_writeback_state_T_8 = meta_no_clients & req_acquire; // @[MSHR.scala:219:53, :220:25, :244:72]
wire [1:0] _final_meta_writeback_state_T_9 = {1'h1, ~_final_meta_writeback_state_T_8}; // @[MSHR.scala:244:{55,72}]
wire _GEN_5 = meta_state == 2'h1; // @[MSHR.scala:100:17, :240:70]
wire _final_meta_writeback_state_T_10; // @[MSHR.scala:240:70]
assign _final_meta_writeback_state_T_10 = _GEN_5; // @[MSHR.scala:240:70]
wire _io_schedule_bits_c_bits_param_T; // @[MSHR.scala:291:53]
assign _io_schedule_bits_c_bits_param_T = _GEN_5; // @[MSHR.scala:240:70, :291:53]
wire _evict_T_1; // @[MSHR.scala:317:26]
assign _evict_T_1 = _GEN_5; // @[MSHR.scala:240:70, :317:26]
wire _before_T; // @[MSHR.scala:317:26]
assign _before_T = _GEN_5; // @[MSHR.scala:240:70, :317:26]
wire [1:0] _final_meta_writeback_state_T_13 = {_final_meta_writeback_state_T_12, 1'h1}; // @[MSHR.scala:240:70]
wire _final_meta_writeback_state_T_14 = &meta_state; // @[MSHR.scala:100:17, :221:81, :240:70]
wire [1:0] _final_meta_writeback_state_T_15 = _final_meta_writeback_state_T_14 ? _final_meta_writeback_state_T_9 : _final_meta_writeback_state_T_13; // @[MSHR.scala:240:70, :244:55]
wire [1:0] _final_meta_writeback_state_T_16 = _final_meta_writeback_state_T_5 ? _final_meta_writeback_state_T_7 : _final_meta_writeback_state_T_15; // @[MSHR.scala:239:{40,41,55}, :240:70]
wire [1:0] _final_meta_writeback_state_T_17 = req_needT ? _final_meta_writeback_state_T_4 : _final_meta_writeback_state_T_16; // @[Parameters.scala:270:70]
wire _final_meta_writeback_clients_T_10 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :245:66]
wire _final_meta_writeback_clients_T_11 = meta_clients & _final_meta_writeback_clients_T_10; // @[MSHR.scala:100:17, :245:{64,66}]
wire _final_meta_writeback_clients_T_12 = meta_hit & _final_meta_writeback_clients_T_11; // @[MSHR.scala:100:17, :245:{40,64}]
wire _final_meta_writeback_clients_T_13 = req_acquire & req_clientBit; // @[Parameters.scala:56:48]
wire _final_meta_writeback_clients_T_14 = _final_meta_writeback_clients_T_12 | _final_meta_writeback_clients_T_13; // @[MSHR.scala:245:{40,84}, :246:40]
assign final_meta_writeback_tag = request_prio_2 | request_control ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :215:38, :223:52, :228:53, :247:30]
wire _final_meta_writeback_clients_T_15 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :258:54]
wire _final_meta_writeback_clients_T_16 = meta_clients & _final_meta_writeback_clients_T_15; // @[MSHR.scala:100:17, :258:{52,54}]
assign final_meta_writeback_hit = bad_grant ? meta_hit : request_prio_2 | ~request_control; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :227:34, :228:53, :234:30, :248:30, :251:20, :252:21]
assign final_meta_writeback_dirty = ~bad_grant & (request_prio_2 ? _final_meta_writeback_dirty_T_1 : request_control ? ~meta_hit & meta_dirty : _final_meta_writeback_dirty_T_5); // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :224:{34,48}, :228:53, :229:21, :230:36, :236:{32,60}, :251:20, :252:21]
assign final_meta_writeback_state = bad_grant ? {1'h0, meta_hit} : request_prio_2 ? _final_meta_writeback_state_T_3 : request_control ? (meta_hit ? 2'h0 : meta_state) : _final_meta_writeback_state_T_17; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :225:{34,40}, :228:53, :229:21, :231:36, :237:{32,38}, :251:20, :252:21, :257:36, :263:36]
assign final_meta_writeback_clients = bad_grant ? meta_hit & _final_meta_writeback_clients_T_16 : request_prio_2 ? _final_meta_writeback_clients_T_7 : request_control ? (meta_hit ? _final_meta_writeback_clients_T_9 : meta_clients) : _final_meta_writeback_clients_T_14; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :226:{34,50}, :228:53, :229:21, :232:{36,52}, :245:{34,84}, :251:20, :252:21, :258:{36,52}, :264:36]
wire _honour_BtoT_T = meta_clients & req_clientBit; // @[Parameters.scala:56:48]
wire _honour_BtoT_T_1 = _honour_BtoT_T; // @[MSHR.scala:276:{47,64}]
wire honour_BtoT = meta_hit & _honour_BtoT_T_1; // @[MSHR.scala:100:17, :276:{30,64}]
wire _excluded_client_T = meta_hit & request_prio_0; // @[MSHR.scala:98:20, :100:17, :279:38]
wire _excluded_client_T_2 = &request_opcode; // @[Parameters.scala:271:52, :279:50]
wire _excluded_client_T_3 = _excluded_client_T_1 | _excluded_client_T_2; // @[Parameters.scala:279:{12,40,50}]
wire _excluded_client_T_4 = request_opcode == 3'h4; // @[Parameters.scala:279:87]
wire _excluded_client_T_5 = _excluded_client_T_3 | _excluded_client_T_4; // @[Parameters.scala:279:{40,77,87}]
wire _excluded_client_T_8 = _excluded_client_T_5; // @[Parameters.scala:279:{77,106}]
wire _excluded_client_T_9 = _excluded_client_T & _excluded_client_T_8; // @[Parameters.scala:279:106]
wire excluded_client = _excluded_client_T_9 & req_clientBit; // @[Parameters.scala:56:48]
wire [1:0] _io_schedule_bits_a_bits_param_T = meta_hit ? 2'h2 : 2'h1; // @[MSHR.scala:100:17, :282:56]
wire [1:0] _io_schedule_bits_a_bits_param_T_1 = req_needT ? _io_schedule_bits_a_bits_param_T : 2'h0; // @[Parameters.scala:270:70]
assign io_schedule_bits_a_bits_param_0 = {1'h0, _io_schedule_bits_a_bits_param_T_1}; // @[MSHR.scala:84:7, :282:{35,41}]
wire _io_schedule_bits_a_bits_block_T = request_size != 3'h6; // @[MSHR.scala:98:20, :283:51]
wire _io_schedule_bits_a_bits_block_T_1 = request_opcode == 3'h0; // @[MSHR.scala:98:20, :284:55]
wire _io_schedule_bits_a_bits_block_T_2 = &request_opcode; // @[Parameters.scala:271:52]
wire _io_schedule_bits_a_bits_block_T_3 = _io_schedule_bits_a_bits_block_T_1 | _io_schedule_bits_a_bits_block_T_2; // @[MSHR.scala:284:{55,71,89}]
wire _io_schedule_bits_a_bits_block_T_4 = ~_io_schedule_bits_a_bits_block_T_3; // @[MSHR.scala:284:{38,71}]
assign _io_schedule_bits_a_bits_block_T_5 = _io_schedule_bits_a_bits_block_T | _io_schedule_bits_a_bits_block_T_4; // @[MSHR.scala:283:{51,91}, :284:38]
assign io_schedule_bits_a_bits_block_0 = _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:84:7, :283:91]
wire _io_schedule_bits_b_bits_param_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :286:42]
wire [1:0] _io_schedule_bits_b_bits_param_T_1 = req_needT ? 2'h2 : 2'h1; // @[Parameters.scala:270:70]
wire [2:0] _io_schedule_bits_b_bits_param_T_2 = request_prio_1 ? request_param : {1'h0, _io_schedule_bits_b_bits_param_T_1}; // @[MSHR.scala:98:20, :286:{61,97}]
assign _io_schedule_bits_b_bits_param_T_3 = _io_schedule_bits_b_bits_param_T ? 3'h2 : _io_schedule_bits_b_bits_param_T_2; // @[MSHR.scala:286:{41,42,61}]
assign io_schedule_bits_b_bits_param_0 = _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:84:7, :286:41]
wire _io_schedule_bits_b_bits_tag_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :287:42]
assign _io_schedule_bits_b_bits_tag_T_1 = _io_schedule_bits_b_bits_tag_T ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :287:{41,42}]
assign io_schedule_bits_b_bits_tag_0 = _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:84:7, :287:41]
wire _io_schedule_bits_b_bits_clients_T = ~excluded_client; // @[MSHR.scala:279:28, :289:53]
assign _io_schedule_bits_b_bits_clients_T_1 = meta_clients & _io_schedule_bits_b_bits_clients_T; // @[MSHR.scala:100:17, :289:{51,53}]
assign io_schedule_bits_b_bits_clients_0 = _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:84:7, :289:51]
assign _io_schedule_bits_c_bits_opcode_T = {2'h3, meta_dirty}; // @[MSHR.scala:100:17, :290:41]
assign io_schedule_bits_c_bits_opcode_0 = _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:84:7, :290:41]
assign _io_schedule_bits_c_bits_param_T_1 = _io_schedule_bits_c_bits_param_T ? 3'h2 : 3'h1; // @[MSHR.scala:291:{41,53}]
assign io_schedule_bits_c_bits_param_0 = _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:84:7, :291:41]
wire _io_schedule_bits_d_bits_param_T = ~req_acquire; // @[MSHR.scala:219:53, :298:42]
wire [1:0] _io_schedule_bits_d_bits_param_T_1 = {1'h0, req_promoteT}; // @[MSHR.scala:221:34, :300:53]
wire [1:0] _io_schedule_bits_d_bits_param_T_2 = honour_BtoT ? 2'h2 : 2'h1; // @[MSHR.scala:276:30, :301:53]
wire _io_schedule_bits_d_bits_param_T_3 = ~(|request_param); // @[Parameters.scala:271:89]
wire [2:0] _io_schedule_bits_d_bits_param_T_4 = _io_schedule_bits_d_bits_param_T_3 ? {1'h0, _io_schedule_bits_d_bits_param_T_1} : request_param; // @[MSHR.scala:98:20, :299:79, :300:53]
wire [2:0] _io_schedule_bits_d_bits_param_T_6 = _io_schedule_bits_d_bits_param_T_5 ? {1'h0, _io_schedule_bits_d_bits_param_T_2} : _io_schedule_bits_d_bits_param_T_4; // @[MSHR.scala:299:79, :301:53]
wire [2:0] _io_schedule_bits_d_bits_param_T_8 = _io_schedule_bits_d_bits_param_T_7 ? 3'h1 : _io_schedule_bits_d_bits_param_T_6; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_9 = _io_schedule_bits_d_bits_param_T ? request_param : _io_schedule_bits_d_bits_param_T_8; // @[MSHR.scala:98:20, :298:{41,42}, :299:79]
assign io_schedule_bits_d_bits_param_0 = _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:84:7, :298:41]
wire _io_schedule_bits_dir_bits_data_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :310:42]
assign _io_schedule_bits_dir_bits_data_T_1_dirty = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_dirty; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_state = _io_schedule_bits_dir_bits_data_T ? 2'h0 : _io_schedule_bits_dir_bits_data_WIRE_state; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_clients = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_clients; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_tag = _io_schedule_bits_dir_bits_data_T ? 13'h0 : _io_schedule_bits_dir_bits_data_WIRE_tag; // @[MSHR.scala:310:{41,42,71}]
assign io_schedule_bits_dir_bits_data_dirty_0 = _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_state_0 = _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_clients_0 = _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_tag_0 = _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:84:7, :310:41]
wire _evict_T = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :338:32]
wire [3:0] evict; // @[MSHR.scala:314:26]
wire _evict_out_T = ~evict_c; // @[MSHR.scala:315:27, :318:32]
wire [1:0] _GEN_6 = {1'h1, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32]
wire [1:0] _evict_out_T_1; // @[MSHR.scala:319:32]
assign _evict_out_T_1 = _GEN_6; // @[MSHR.scala:319:32]
wire [1:0] _before_out_T_1; // @[MSHR.scala:319:32]
assign _before_out_T_1 = _GEN_6; // @[MSHR.scala:319:32]
wire _evict_T_3 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26]
wire [2:0] _GEN_7 = {2'h2, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:39]
wire [2:0] _evict_out_T_2; // @[MSHR.scala:320:39]
assign _evict_out_T_2 = _GEN_7; // @[MSHR.scala:320:39]
wire [2:0] _before_out_T_2; // @[MSHR.scala:320:39]
assign _before_out_T_2 = _GEN_7; // @[MSHR.scala:320:39]
wire [2:0] _GEN_8 = {2'h3, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:76]
wire [2:0] _evict_out_T_3; // @[MSHR.scala:320:76]
assign _evict_out_T_3 = _GEN_8; // @[MSHR.scala:320:76]
wire [2:0] _before_out_T_3; // @[MSHR.scala:320:76]
assign _before_out_T_3 = _GEN_8; // @[MSHR.scala:320:76]
wire [2:0] _evict_out_T_4 = evict_c ? _evict_out_T_2 : _evict_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _evict_T_4 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26]
wire _evict_T_5 = ~_evict_T; // @[MSHR.scala:323:11, :338:32]
assign evict = _evict_T_5 ? 4'h8 : _evict_T_1 ? {3'h0, _evict_out_T} : _evict_T_2 ? {2'h0, _evict_out_T_1} : _evict_T_3 ? {1'h0, _evict_out_T_4} : {_evict_T_4, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}]
wire [3:0] before_0; // @[MSHR.scala:314:26]
wire _before_out_T = ~before_c; // @[MSHR.scala:315:27, :318:32]
wire _before_T_2 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26]
wire [2:0] _before_out_T_4 = before_c ? _before_out_T_2 : _before_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _before_T_3 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26]
wire _before_T_4 = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :323:11]
assign before_0 = _before_T_4 ? 4'h8 : _before_T ? {3'h0, _before_out_T} : _before_T_1 ? {2'h0, _before_out_T_1} : _before_T_2 ? {1'h0, _before_out_T_4} : {_before_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}]
wire [3:0] after; // @[MSHR.scala:314:26]
wire _GEN_9 = final_meta_writeback_state == 2'h1; // @[MSHR.scala:215:38, :317:26]
wire _after_T; // @[MSHR.scala:317:26]
assign _after_T = _GEN_9; // @[MSHR.scala:317:26]
wire _prior_T; // @[MSHR.scala:317:26]
assign _prior_T = _GEN_9; // @[MSHR.scala:317:26]
wire _after_out_T = ~after_c; // @[MSHR.scala:315:27, :318:32]
wire _GEN_10 = final_meta_writeback_state == 2'h2; // @[MSHR.scala:215:38, :317:26]
wire _after_T_1; // @[MSHR.scala:317:26]
assign _after_T_1 = _GEN_10; // @[MSHR.scala:317:26]
wire _prior_T_1; // @[MSHR.scala:317:26]
assign _prior_T_1 = _GEN_10; // @[MSHR.scala:317:26]
wire [1:0] _GEN_11 = {1'h1, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32]
wire [1:0] _after_out_T_1; // @[MSHR.scala:319:32]
assign _after_out_T_1 = _GEN_11; // @[MSHR.scala:319:32]
wire [1:0] _prior_out_T_1; // @[MSHR.scala:319:32]
assign _prior_out_T_1 = _GEN_11; // @[MSHR.scala:319:32]
wire _after_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26]
wire [2:0] _GEN_12 = {2'h2, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:39]
wire [2:0] _after_out_T_2; // @[MSHR.scala:320:39]
assign _after_out_T_2 = _GEN_12; // @[MSHR.scala:320:39]
wire [2:0] _prior_out_T_2; // @[MSHR.scala:320:39]
assign _prior_out_T_2 = _GEN_12; // @[MSHR.scala:320:39]
wire [2:0] _GEN_13 = {2'h3, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:76]
wire [2:0] _after_out_T_3; // @[MSHR.scala:320:76]
assign _after_out_T_3 = _GEN_13; // @[MSHR.scala:320:76]
wire [2:0] _prior_out_T_3; // @[MSHR.scala:320:76]
assign _prior_out_T_3 = _GEN_13; // @[MSHR.scala:320:76]
wire [2:0] _after_out_T_4 = after_c ? _after_out_T_2 : _after_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _GEN_14 = final_meta_writeback_state == 2'h0; // @[MSHR.scala:215:38, :317:26]
wire _after_T_3; // @[MSHR.scala:317:26]
assign _after_T_3 = _GEN_14; // @[MSHR.scala:317:26]
wire _prior_T_3; // @[MSHR.scala:317:26]
assign _prior_T_3 = _GEN_14; // @[MSHR.scala:317:26]
assign after = _after_T ? {3'h0, _after_out_T} : _after_T_1 ? {2'h0, _after_out_T_1} : _after_T_2 ? {1'h0, _after_out_T_4} : {_after_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26]
wire [2:0] probe_bit_uncommonBits = _probe_bit_uncommonBits_T[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] _probe_bit_T = io_sinkc_bits_source_0[5:3]; // @[Parameters.scala:54:10]
wire _probe_bit_T_1 = _probe_bit_T == 3'h4; // @[Parameters.scala:54:{10,32}]
wire _probe_bit_T_3 = _probe_bit_T_1; // @[Parameters.scala:54:{32,67}]
wire _probe_bit_T_4 = probe_bit_uncommonBits < 3'h5; // @[Parameters.scala:52:56, :57:20]
wire probe_bit = _probe_bit_T_3 & _probe_bit_T_4; // @[Parameters.scala:54:67, :56:48, :57:20]
wire _GEN_15 = probes_done | probe_bit; // @[Parameters.scala:56:48]
wire _last_probe_T; // @[MSHR.scala:459:33]
assign _last_probe_T = _GEN_15; // @[MSHR.scala:459:33]
wire _probes_done_T; // @[MSHR.scala:467:32]
assign _probes_done_T = _GEN_15; // @[MSHR.scala:459:33, :467:32]
wire _last_probe_T_1 = ~excluded_client; // @[MSHR.scala:279:28, :289:53, :459:66]
wire _last_probe_T_2 = meta_clients & _last_probe_T_1; // @[MSHR.scala:100:17, :459:{64,66}]
wire last_probe = _last_probe_T == _last_probe_T_2; // @[MSHR.scala:459:{33,46,64}]
wire _probe_toN_T = io_sinkc_bits_param_0 == 3'h1; // @[Parameters.scala:282:11]
wire _probe_toN_T_1 = io_sinkc_bits_param_0 == 3'h2; // @[Parameters.scala:282:43]
wire _probe_toN_T_2 = _probe_toN_T | _probe_toN_T_1; // @[Parameters.scala:282:{11,34,43}]
wire _probe_toN_T_3 = io_sinkc_bits_param_0 == 3'h5; // @[Parameters.scala:282:75]
wire probe_toN = _probe_toN_T_2 | _probe_toN_T_3; // @[Parameters.scala:282:{34,66,75}]
wire _probes_toN_T = probe_toN & probe_bit; // @[Parameters.scala:56:48]
wire _probes_toN_T_1 = probes_toN | _probes_toN_T; // @[MSHR.scala:151:23, :468:{30,35}]
wire _probes_noT_T = io_sinkc_bits_param_0 != 3'h3; // @[MSHR.scala:84:7, :469:53]
wire _probes_noT_T_1 = probes_noT | _probes_noT_T; // @[MSHR.scala:152:23, :469:{30,53}]
wire _w_rprobeackfirst_T = w_rprobeackfirst | last_probe; // @[MSHR.scala:122:33, :459:46, :470:42]
wire _GEN_16 = last_probe & io_sinkc_bits_last_0; // @[MSHR.scala:84:7, :459:46, :471:55]
wire _w_rprobeacklast_T; // @[MSHR.scala:471:55]
assign _w_rprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55]
wire _w_pprobeacklast_T; // @[MSHR.scala:473:55]
assign _w_pprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55, :473:55]
wire _w_rprobeacklast_T_1 = w_rprobeacklast | _w_rprobeacklast_T; // @[MSHR.scala:123:33, :471:{40,55}]
wire _w_pprobeackfirst_T = w_pprobeackfirst | last_probe; // @[MSHR.scala:132:33, :459:46, :472:42]
wire _w_pprobeacklast_T_1 = w_pprobeacklast | _w_pprobeacklast_T; // @[MSHR.scala:133:33, :473:{40,55}]
wire _set_pprobeack_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77]
wire _set_pprobeack_T_1 = io_sinkc_bits_last_0 | _set_pprobeack_T; // @[MSHR.scala:84:7, :475:{59,77}]
wire set_pprobeack = last_probe & _set_pprobeack_T_1; // @[MSHR.scala:459:46, :475:{36,59}]
wire _w_pprobeack_T = w_pprobeack | set_pprobeack; // @[MSHR.scala:134:33, :475:36, :476:32]
wire _w_grant_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77, :490:33]
wire _w_grant_T_1 = _w_grant_T | io_sinkd_bits_last_0; // @[MSHR.scala:84:7, :490:{33,41}]
wire _gotT_T = io_sinkd_bits_param_0 == 3'h0; // @[MSHR.scala:84:7, :493:35]
wire _new_meta_T = io_allocate_valid_0 & io_allocate_bits_repeat_0; // @[MSHR.scala:84:7, :505:40]
wire new_meta_dirty = _new_meta_T ? final_meta_writeback_dirty : io_directory_bits_dirty_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [1:0] new_meta_state = _new_meta_T ? final_meta_writeback_state : io_directory_bits_state_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_meta_clients = _new_meta_T ? final_meta_writeback_clients : io_directory_bits_clients_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [12:0] new_meta_tag = _new_meta_T ? final_meta_writeback_tag : io_directory_bits_tag_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_meta_hit = _new_meta_T ? final_meta_writeback_hit : io_directory_bits_hit_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [2:0] new_meta_way = _new_meta_T ? final_meta_writeback_way : io_directory_bits_way_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_request_prio_0 = io_allocate_valid_0 ? allocate_as_full_prio_0 : request_prio_0; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_prio_1 = io_allocate_valid_0 ? allocate_as_full_prio_1 : request_prio_1; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_prio_2 = io_allocate_valid_0 ? allocate_as_full_prio_2 : request_prio_2; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_control = io_allocate_valid_0 ? allocate_as_full_control : request_control; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_opcode = io_allocate_valid_0 ? allocate_as_full_opcode : request_opcode; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_param = io_allocate_valid_0 ? allocate_as_full_param : request_param; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_size = io_allocate_valid_0 ? allocate_as_full_size : request_size; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_source = io_allocate_valid_0 ? allocate_as_full_source : request_source; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [12:0] new_request_tag = io_allocate_valid_0 ? allocate_as_full_tag : request_tag; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_offset = io_allocate_valid_0 ? allocate_as_full_offset : request_offset; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_put = io_allocate_valid_0 ? allocate_as_full_put : request_put; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [9:0] new_request_set = io_allocate_valid_0 ? allocate_as_full_set : request_set; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] _new_clientBit_uncommonBits_T = new_request_source; // @[Parameters.scala:52:29]
wire _new_needT_T = new_request_opcode[2]; // @[Parameters.scala:269:12]
wire _new_needT_T_1 = ~_new_needT_T; // @[Parameters.scala:269:{5,12}]
wire _GEN_17 = new_request_opcode == 3'h5; // @[Parameters.scala:270:13]
wire _new_needT_T_2; // @[Parameters.scala:270:13]
assign _new_needT_T_2 = _GEN_17; // @[Parameters.scala:270:13]
wire _new_skipProbe_T_5; // @[Parameters.scala:279:117]
assign _new_skipProbe_T_5 = _GEN_17; // @[Parameters.scala:270:13, :279:117]
wire _new_needT_T_3 = new_request_param == 3'h1; // @[Parameters.scala:270:42]
wire _new_needT_T_4 = _new_needT_T_2 & _new_needT_T_3; // @[Parameters.scala:270:{13,33,42}]
wire _new_needT_T_5 = _new_needT_T_1 | _new_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33]
wire _T_615 = new_request_opcode == 3'h6; // @[Parameters.scala:271:14]
wire _new_needT_T_6; // @[Parameters.scala:271:14]
assign _new_needT_T_6 = _T_615; // @[Parameters.scala:271:14]
wire _new_skipProbe_T; // @[Parameters.scala:279:12]
assign _new_skipProbe_T = _T_615; // @[Parameters.scala:271:14, :279:12]
wire _new_needT_T_7 = &new_request_opcode; // @[Parameters.scala:271:52]
wire _new_needT_T_8 = _new_needT_T_6 | _new_needT_T_7; // @[Parameters.scala:271:{14,42,52}]
wire _new_needT_T_9 = |new_request_param; // @[Parameters.scala:271:89]
wire _new_needT_T_10 = _new_needT_T_8 & _new_needT_T_9; // @[Parameters.scala:271:{42,80,89}]
wire new_needT = _new_needT_T_5 | _new_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80]
wire [2:0] new_clientBit_uncommonBits = _new_clientBit_uncommonBits_T[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] _new_clientBit_T = new_request_source[5:3]; // @[Parameters.scala:54:10]
wire _new_clientBit_T_1 = _new_clientBit_T == 3'h4; // @[Parameters.scala:54:{10,32}]
wire _new_clientBit_T_3 = _new_clientBit_T_1; // @[Parameters.scala:54:{32,67}]
wire _new_clientBit_T_4 = new_clientBit_uncommonBits < 3'h5; // @[Parameters.scala:52:56, :57:20]
wire new_clientBit = _new_clientBit_T_3 & _new_clientBit_T_4; // @[Parameters.scala:54:67, :56:48, :57:20]
wire _new_skipProbe_T_1 = &new_request_opcode; // @[Parameters.scala:271:52, :279:50]
wire _new_skipProbe_T_2 = _new_skipProbe_T | _new_skipProbe_T_1; // @[Parameters.scala:279:{12,40,50}]
wire _new_skipProbe_T_3 = new_request_opcode == 3'h4; // @[Parameters.scala:279:87]
wire _new_skipProbe_T_4 = _new_skipProbe_T_2 | _new_skipProbe_T_3; // @[Parameters.scala:279:{40,77,87}]
wire _new_skipProbe_T_7 = _new_skipProbe_T_4; // @[Parameters.scala:279:{77,106}]
wire new_skipProbe = _new_skipProbe_T_7 & new_clientBit; // @[Parameters.scala:56:48]
wire [3:0] prior; // @[MSHR.scala:314:26]
wire _prior_out_T = ~prior_c; // @[MSHR.scala:315:27, :318:32]
wire _prior_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26]
wire [2:0] _prior_out_T_4 = prior_c ? _prior_out_T_2 : _prior_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
assign prior = _prior_T ? {3'h0, _prior_out_T} : _prior_T_1 ? {2'h0, _prior_out_T_1} : _prior_T_2 ? {1'h0, _prior_out_T_4} : {_prior_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26]
wire _T_574 = io_directory_valid_0 | _new_meta_T; // @[MSHR.scala:84:7, :505:40, :539:28] |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File Manager.scala:
package rerocc.manager
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tile._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util._
import freechips.rocketchip.prci._
import freechips.rocketchip.subsystem._
import rerocc.bus._
case class ReRoCCManagerParams(
managerId: Int,
)
case object ReRoCCManagerControlAddress extends Field[BigInt](0x20000)
// For local PTW
class MiniDCache(reRoCCId: Int, crossing: ClockCrossingType)(implicit p: Parameters) extends DCache(0, crossing)(p) {
override def cacheClientParameters = Seq(TLMasterParameters.v1(
name = s"ReRoCC ${reRoCCId} DCache",
sourceId = IdRange(0, 1),
supportsProbe = TransferSizes(cfg.blockBytes, cfg.blockBytes)))
override def mmioClientParameters = Seq(TLMasterParameters.v1(
name = s"ReRoCC ${reRoCCId} DCache MMIO",
sourceId = IdRange(firstMMIO, firstMMIO + cfg.nMMIOs),
requestFifo = true))
}
class ReRoCCManager(reRoCCTileParams: ReRoCCTileParams, roccOpcode: UInt)(implicit p: Parameters) extends LazyModule {
val node = ReRoCCManagerNode(ReRoCCManagerParams(reRoCCTileParams.reroccId))
val ibufEntries = p(ReRoCCIBufEntriesKey)
override lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
val io = IO(new Bundle {
val manager_id = Input(UInt(log2Ceil(p(ReRoCCTileKey).size).W))
val cmd = Decoupled(new RoCCCommand)
val resp = Flipped(Decoupled(new RoCCResponse))
val busy = Input(Bool())
val ptw = Flipped(new DatapathPTWIO)
})
val (rerocc, edge) = node.in(0)
val s_idle :: s_active :: s_rel_wait :: s_sfence :: s_unbusy :: Nil = Enum(5)
val numClients = edge.cParams.clients.map(_.nCfgs).sum
val client = Reg(UInt(log2Ceil(numClients).W))
val status = Reg(new MStatus)
val ptbr = Reg(new PTBR)
val state = RegInit(s_idle)
io.ptw.ptbr := ptbr
io.ptw.hgatp := 0.U.asTypeOf(new PTBR)
io.ptw.vsatp := 0.U.asTypeOf(new PTBR)
io.ptw.sfence.valid := state === s_sfence
io.ptw.sfence.bits.rs1 := false.B
io.ptw.sfence.bits.rs2 := false.B
io.ptw.sfence.bits.addr := 0.U
io.ptw.sfence.bits.asid := 0.U
io.ptw.sfence.bits.hv := false.B
io.ptw.sfence.bits.hg := false.B
io.ptw.status := status
io.ptw.hstatus := 0.U.asTypeOf(new HStatus)
io.ptw.gstatus := 0.U.asTypeOf(new MStatus)
io.ptw.pmp.foreach(_ := 0.U.asTypeOf(new PMP))
val rr_req = Queue(rerocc.req)
val (req_first, req_last, req_beat) = ReRoCCMsgFirstLast(rr_req, true)
val rr_resp = rerocc.resp
rr_req.ready := false.B
val inst_q = Module(new Queue(new RoCCCommand, ibufEntries))
val enq_inst = Reg(new RoCCCommand)
val next_enq_inst = WireInit(enq_inst)
inst_q.io.enq.valid := false.B
inst_q.io.enq.bits := next_enq_inst
inst_q.io.enq.bits.inst.opcode := roccOpcode
// 0 -> acquire ack
// 1 -> inst ack
// 2 -> writeback
// 3 -> rel
// 4 -> unbusyack
val resp_arb = Module(new ReRoCCMsgArbiter(edge.bundle, 5, false))
rr_resp <> resp_arb.io.out
resp_arb.io.in.foreach { i => i.valid := false.B }
val status_lower = Reg(UInt(64.W))
when (rr_req.valid) {
when (rr_req.bits.opcode === ReRoCCProtocol.mAcquire) {
rr_req.ready := resp_arb.io.in(0).ready
resp_arb.io.in(0).valid := true.B
when (state === s_idle && rr_req.fire) {
state := s_active
client := rr_req.bits.client_id
}
} .elsewhen (rr_req.bits.opcode === ReRoCCProtocol.mUStatus) {
rr_req.ready := !inst_q.io.deq.valid && !io.busy
when (!inst_q.io.deq.valid && !io.busy) {
when (req_first) { status_lower := rr_req.bits.data }
when (req_last) { status := Cat(rr_req.bits.data, status_lower).asTypeOf(new MStatus) }
}
} .elsewhen (rr_req.bits.opcode === ReRoCCProtocol.mUPtbr) {
rr_req.ready := !inst_q.io.deq.valid && !io.busy
when (!inst_q.io.deq.valid && !io.busy) { ptbr := rr_req.bits.data.asTypeOf(new PTBR) }
} .elsewhen (rr_req.bits.opcode === ReRoCCProtocol.mInst) {
assert(state === s_active && inst_q.io.enq.ready)
rr_req.ready := true.B
when (req_beat === 0.U) {
val inst = rr_req.bits.data.asTypeOf(new RoCCInstruction)
enq_inst.inst := inst
when (!inst.xs1 ) { enq_inst.rs1 := 0.U }
when (!inst.xs2 ) { enq_inst.rs2 := 0.U }
} .otherwise {
val enq_inst_rs1 = enq_inst.inst.xs1 && req_beat === 1.U
val enq_inst_rs2 = enq_inst.inst.xs2 && req_beat === Mux(enq_inst.inst.xs1, 2.U, 1.U)
when (enq_inst_rs1) { next_enq_inst.rs1 := rr_req.bits.data }
when (enq_inst_rs2) { next_enq_inst.rs2 := rr_req.bits.data }
enq_inst := next_enq_inst
}
when (req_last) {
inst_q.io.enq.valid := true.B
assert(inst_q.io.enq.ready)
}
} .elsewhen (rr_req.bits.opcode === ReRoCCProtocol.mRelease) {
rr_req.ready := true.B
state := s_rel_wait
} .elsewhen (rr_req.bits.opcode === ReRoCCProtocol.mUnbusy) {
rr_req.ready := true.B
state := s_unbusy
} .otherwise {
assert(false.B)
}
}
// acquire->ack/nack
resp_arb.io.in(0).bits.opcode := ReRoCCProtocol.sAcqResp
resp_arb.io.in(0).bits.client_id := rr_req.bits.client_id
resp_arb.io.in(0).bits.manager_id := io.manager_id
resp_arb.io.in(0).bits.data := state === s_idle
// insts -> (inst_q, inst_ack)
io.cmd.valid := inst_q.io.deq.valid && resp_arb.io.in(1).ready
io.cmd.bits := inst_q.io.deq.bits
inst_q.io.deq.ready := io.cmd.ready && resp_arb.io.in(1).ready
resp_arb.io.in(1).valid := inst_q.io.deq.valid && io.cmd.ready
resp_arb.io.in(1).bits.opcode := ReRoCCProtocol.sInstAck
resp_arb.io.in(1).bits.client_id := client
resp_arb.io.in(1).bits.manager_id := io.manager_id
resp_arb.io.in(1).bits.data := 0.U
// writebacks
val resp = Queue(io.resp)
val resp_rd = RegInit(false.B)
resp_arb.io.in(2).valid := resp.valid
resp_arb.io.in(2).bits.opcode := ReRoCCProtocol.sWrite
resp_arb.io.in(2).bits.client_id := client
resp_arb.io.in(2).bits.manager_id := io.manager_id
resp_arb.io.in(2).bits.data := Mux(resp_rd, resp.bits.rd, resp.bits.data)
when (resp_arb.io.in(2).fire) { resp_rd := !resp_rd }
resp.ready := resp_arb.io.in(2).ready && resp_rd
// release
resp_arb.io.in(3).valid := state === s_rel_wait && !io.busy && inst_q.io.count === 0.U
resp_arb.io.in(3).bits.opcode := ReRoCCProtocol.sRelResp
resp_arb.io.in(3).bits.client_id := client
resp_arb.io.in(3).bits.manager_id := io.manager_id
resp_arb.io.in(3).bits.data := 0.U
when (resp_arb.io.in(3).fire) {
state := s_sfence
}
when (state === s_sfence) { state := s_idle }
// unbusyack
resp_arb.io.in(4).valid := state === s_unbusy && !io.busy && inst_q.io.count === 0.U
resp_arb.io.in(4).bits.opcode := ReRoCCProtocol.sUnbusyAck
resp_arb.io.in(4).bits.client_id := client
resp_arb.io.in(4).bits.manager_id := io.manager_id
resp_arb.io.in(4).bits.data := 0.U
when (resp_arb.io.in(4).fire) { state := s_active }
}
}
class ReRoCCManagerTile()(implicit p: Parameters) extends LazyModule {
val reRoCCParams = p(TileKey).asInstanceOf[ReRoCCTileParams]
val reRoCCId = reRoCCParams.reroccId
def this(tileParams: ReRoCCTileParams, p: Parameters) = {
this()(p.alterMap(Map(
TileKey -> tileParams,
TileVisibilityNodeKey -> TLEphemeralNode()(ValName("rerocc_manager"))
)))
}
val reroccManagerIdSinkNode = BundleBridgeSink[UInt]()
val rocc = reRoCCParams.genRoCC.get(p)
require(rocc.opcodes.opcodes.size == 1)
val rerocc_manager = LazyModule(new ReRoCCManager(reRoCCParams, rocc.opcodes.opcodes.head))
val reRoCCNode = ReRoCCIdentityNode()
rerocc_manager.node := ReRoCCBuffer() := reRoCCNode
val tlNode = p(TileVisibilityNodeKey) // throttle before TL Node (merged ->
val tlXbar = TLXbar()
val stlNode = TLIdentityNode()
tlXbar :=* rocc.atlNode
if (reRoCCParams.mergeTLNodes) {
tlXbar :=* rocc.tlNode
} else {
tlNode :=* rocc.tlNode
}
tlNode :=* TLBuffer() :=* tlXbar
rocc.stlNode :*= stlNode
// minicache
val dcache = reRoCCParams.dcacheParams.map(_ => LazyModule(new MiniDCache(reRoCCId, SynchronousCrossing())(p)))
dcache.map(d => tlXbar := TLWidthWidget(reRoCCParams.rowBits/8) := d.node)
val hellammio: Option[HellaMMIO] = if (!dcache.isDefined) {
val h = LazyModule(new HellaMMIO(s"ReRoCC $reRoCCId MMIO"))
tlXbar := h.node
Some(h)
} else { None }
val ctrl = LazyModule(new ReRoCCManagerControl(reRoCCId, 8))
override lazy val module = new LazyModuleImp(this) {
val dcacheArb = Module(new HellaCacheArbiter(2)(p))
dcache.map(_.module.io.cpu).getOrElse(hellammio.get.module.io) <> dcacheArb.io.mem
val edge = dcache.map(_.node.edges.out(0)).getOrElse(hellammio.get.node.edges.out(0))
val ptw = Module(new PTW(1 + rocc.nPTWPorts)(edge, p))
if (dcache.isDefined) {
dcache.get.module.io.tlb_port := DontCare
dcache.get.module.io.tlb_port.req.valid := false.B
ptw.io.requestor(0) <> dcache.get.module.io.ptw
} else {
ptw.io.requestor(0) := DontCare
ptw.io.requestor(0).req.valid := false.B
}
dcacheArb.io.requestor(0) <> ptw.io.mem
val dcIF = Module(new SimpleHellaCacheIF)
dcIF.io.requestor <> rocc.module.io.mem
dcacheArb.io.requestor(1) <> dcIF.io.cache
for (i <- 0 until rocc.nPTWPorts) {
ptw.io.requestor(1+i) <> rocc.module.io.ptw(i)
}
rerocc_manager.module.io.manager_id := reroccManagerIdSinkNode.bundle
rocc.module.io.cmd <> rerocc_manager.module.io.cmd
rerocc_manager.module.io.resp <> rocc.module.io.resp
rerocc_manager.module.io.busy := rocc.module.io.busy
ptw.io.dpath <> rerocc_manager.module.io.ptw
rocc.module.io.fpu_req.ready := false.B
assert(!rocc.module.io.fpu_req.valid)
rocc.module.io.fpu_resp.valid := false.B
rocc.module.io.fpu_resp.bits := DontCare
rocc.module.io.exception := false.B
ctrl.module.io.mgr_busy := rerocc_manager.module.io.busy
ctrl.module.io.rocc_busy := rocc.module.io.busy
}
}
File LazyRoCC.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tile
import chisel3._
import chisel3.util._
import chisel3.experimental.IntParam
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.rocket.{
MStatus, HellaCacheIO, TLBPTWIO, CanHavePTW, CanHavePTWModule,
SimpleHellaCacheIF, M_XRD, PTE, PRV, M_SZ
}
import freechips.rocketchip.tilelink.{
TLNode, TLIdentityNode, TLClientNode, TLMasterParameters, TLMasterPortParameters
}
import freechips.rocketchip.util.InOrderArbiter
case object BuildRoCC extends Field[Seq[Parameters => LazyRoCC]](Nil)
class RoCCInstruction extends Bundle {
val funct = Bits(7.W)
val rs2 = Bits(5.W)
val rs1 = Bits(5.W)
val xd = Bool()
val xs1 = Bool()
val xs2 = Bool()
val rd = Bits(5.W)
val opcode = Bits(7.W)
}
class RoCCCommand(implicit p: Parameters) extends CoreBundle()(p) {
val inst = new RoCCInstruction
val rs1 = Bits(xLen.W)
val rs2 = Bits(xLen.W)
val status = new MStatus
}
class RoCCResponse(implicit p: Parameters) extends CoreBundle()(p) {
val rd = Bits(5.W)
val data = Bits(xLen.W)
}
class RoCCCoreIO(val nRoCCCSRs: Int = 0)(implicit p: Parameters) extends CoreBundle()(p) {
val cmd = Flipped(Decoupled(new RoCCCommand))
val resp = Decoupled(new RoCCResponse)
val mem = new HellaCacheIO
val busy = Output(Bool())
val interrupt = Output(Bool())
val exception = Input(Bool())
val csrs = Flipped(Vec(nRoCCCSRs, new CustomCSRIO))
}
class RoCCIO(val nPTWPorts: Int, nRoCCCSRs: Int)(implicit p: Parameters) extends RoCCCoreIO(nRoCCCSRs)(p) {
val ptw = Vec(nPTWPorts, new TLBPTWIO)
val fpu_req = Decoupled(new FPInput)
val fpu_resp = Flipped(Decoupled(new FPResult))
}
/** Base classes for Diplomatic TL2 RoCC units **/
abstract class LazyRoCC(
val opcodes: OpcodeSet,
val nPTWPorts: Int = 0,
val usesFPU: Boolean = false,
val roccCSRs: Seq[CustomCSR] = Nil
)(implicit p: Parameters) extends LazyModule {
val module: LazyRoCCModuleImp
require(roccCSRs.map(_.id).toSet.size == roccCSRs.size)
val atlNode: TLNode = TLIdentityNode()
val tlNode: TLNode = TLIdentityNode()
val stlNode: TLNode = TLIdentityNode()
}
class LazyRoCCModuleImp(outer: LazyRoCC) extends LazyModuleImp(outer) {
val io = IO(new RoCCIO(outer.nPTWPorts, outer.roccCSRs.size))
io := DontCare
}
/** Mixins for including RoCC **/
trait HasLazyRoCC extends CanHavePTW { this: BaseTile =>
val roccs = p(BuildRoCC).map(_(p))
val roccCSRs = roccs.map(_.roccCSRs) // the set of custom CSRs requested by all roccs
require(roccCSRs.flatten.map(_.id).toSet.size == roccCSRs.flatten.size,
"LazyRoCC instantiations require overlapping CSRs")
roccs.map(_.atlNode).foreach { atl => tlMasterXbar.node :=* atl }
roccs.map(_.tlNode).foreach { tl => tlOtherMastersNode :=* tl }
roccs.map(_.stlNode).foreach { stl => stl :*= tlSlaveXbar.node }
nPTWPorts += roccs.map(_.nPTWPorts).sum
nDCachePorts += roccs.size
}
trait HasLazyRoCCModule extends CanHavePTWModule
with HasCoreParameters { this: RocketTileModuleImp =>
val (respArb, cmdRouter) = if(outer.roccs.nonEmpty) {
val respArb = Module(new RRArbiter(new RoCCResponse()(outer.p), outer.roccs.size))
val cmdRouter = Module(new RoccCommandRouter(outer.roccs.map(_.opcodes))(outer.p))
outer.roccs.zipWithIndex.foreach { case (rocc, i) =>
rocc.module.io.ptw ++=: ptwPorts
rocc.module.io.cmd <> cmdRouter.io.out(i)
val dcIF = Module(new SimpleHellaCacheIF()(outer.p))
dcIF.io.requestor <> rocc.module.io.mem
dcachePorts += dcIF.io.cache
respArb.io.in(i) <> Queue(rocc.module.io.resp)
}
(Some(respArb), Some(cmdRouter))
} else {
(None, None)
}
val roccCSRIOs = outer.roccs.map(_.module.io.csrs)
}
class AccumulatorExample(opcodes: OpcodeSet, val n: Int = 4)(implicit p: Parameters) extends LazyRoCC(opcodes) {
override lazy val module = new AccumulatorExampleModuleImp(this)
}
class AccumulatorExampleModuleImp(outer: AccumulatorExample)(implicit p: Parameters) extends LazyRoCCModuleImp(outer)
with HasCoreParameters {
val regfile = Mem(outer.n, UInt(xLen.W))
val busy = RegInit(VecInit(Seq.fill(outer.n){false.B}))
val cmd = Queue(io.cmd)
val funct = cmd.bits.inst.funct
val addr = cmd.bits.rs2(log2Up(outer.n)-1,0)
val doWrite = funct === 0.U
val doRead = funct === 1.U
val doLoad = funct === 2.U
val doAccum = funct === 3.U
val memRespTag = io.mem.resp.bits.tag(log2Up(outer.n)-1,0)
// datapath
val addend = cmd.bits.rs1
val accum = regfile(addr)
val wdata = Mux(doWrite, addend, accum + addend)
when (cmd.fire && (doWrite || doAccum)) {
regfile(addr) := wdata
}
when (io.mem.resp.valid) {
regfile(memRespTag) := io.mem.resp.bits.data
busy(memRespTag) := false.B
}
// control
when (io.mem.req.fire) {
busy(addr) := true.B
}
val doResp = cmd.bits.inst.xd
val stallReg = busy(addr)
val stallLoad = doLoad && !io.mem.req.ready
val stallResp = doResp && !io.resp.ready
cmd.ready := !stallReg && !stallLoad && !stallResp
// command resolved if no stalls AND not issuing a load that will need a request
// PROC RESPONSE INTERFACE
io.resp.valid := cmd.valid && doResp && !stallReg && !stallLoad
// valid response if valid command, need a response, and no stalls
io.resp.bits.rd := cmd.bits.inst.rd
// Must respond with the appropriate tag or undefined behavior
io.resp.bits.data := accum
// Semantics is to always send out prior accumulator register value
io.busy := cmd.valid || busy.reduce(_||_)
// Be busy when have pending memory requests or committed possibility of pending requests
io.interrupt := false.B
// Set this true to trigger an interrupt on the processor (please refer to supervisor documentation)
// MEMORY REQUEST INTERFACE
io.mem.req.valid := cmd.valid && doLoad && !stallReg && !stallResp
io.mem.req.bits.addr := addend
io.mem.req.bits.tag := addr
io.mem.req.bits.cmd := M_XRD // perform a load (M_XWR for stores)
io.mem.req.bits.size := log2Ceil(8).U
io.mem.req.bits.signed := false.B
io.mem.req.bits.data := 0.U // we're not performing any stores...
io.mem.req.bits.phys := false.B
io.mem.req.bits.dprv := cmd.bits.status.dprv
io.mem.req.bits.dv := cmd.bits.status.dv
io.mem.req.bits.no_resp := false.B
}
class TranslatorExample(opcodes: OpcodeSet)(implicit p: Parameters) extends LazyRoCC(opcodes, nPTWPorts = 1) {
override lazy val module = new TranslatorExampleModuleImp(this)
}
class TranslatorExampleModuleImp(outer: TranslatorExample)(implicit p: Parameters) extends LazyRoCCModuleImp(outer)
with HasCoreParameters {
val req_addr = Reg(UInt(coreMaxAddrBits.W))
val req_rd = Reg(chiselTypeOf(io.resp.bits.rd))
val req_offset = req_addr(pgIdxBits - 1, 0)
val req_vpn = req_addr(coreMaxAddrBits - 1, pgIdxBits)
val pte = Reg(new PTE)
val s_idle :: s_ptw_req :: s_ptw_resp :: s_resp :: Nil = Enum(4)
val state = RegInit(s_idle)
io.cmd.ready := (state === s_idle)
when (io.cmd.fire) {
req_rd := io.cmd.bits.inst.rd
req_addr := io.cmd.bits.rs1
state := s_ptw_req
}
private val ptw = io.ptw(0)
when (ptw.req.fire) { state := s_ptw_resp }
when (state === s_ptw_resp && ptw.resp.valid) {
pte := ptw.resp.bits.pte
state := s_resp
}
when (io.resp.fire) { state := s_idle }
ptw.req.valid := (state === s_ptw_req)
ptw.req.bits.valid := true.B
ptw.req.bits.bits.addr := req_vpn
io.resp.valid := (state === s_resp)
io.resp.bits.rd := req_rd
io.resp.bits.data := Mux(pte.leaf(), Cat(pte.ppn, req_offset), -1.S(xLen.W).asUInt)
io.busy := (state =/= s_idle)
io.interrupt := false.B
io.mem.req.valid := false.B
}
class CharacterCountExample(opcodes: OpcodeSet)(implicit p: Parameters) extends LazyRoCC(opcodes) {
override lazy val module = new CharacterCountExampleModuleImp(this)
override val atlNode = TLClientNode(Seq(TLMasterPortParameters.v1(Seq(TLMasterParameters.v1("CharacterCountRoCC")))))
}
class CharacterCountExampleModuleImp(outer: CharacterCountExample)(implicit p: Parameters) extends LazyRoCCModuleImp(outer)
with HasCoreParameters
with HasL1CacheParameters {
val cacheParams = tileParams.dcache.get
private val blockOffset = blockOffBits
private val beatOffset = log2Up(cacheDataBits/8)
val needle = Reg(UInt(8.W))
val addr = Reg(UInt(coreMaxAddrBits.W))
val count = Reg(UInt(xLen.W))
val resp_rd = Reg(chiselTypeOf(io.resp.bits.rd))
val addr_block = addr(coreMaxAddrBits - 1, blockOffset)
val offset = addr(blockOffset - 1, 0)
val next_addr = (addr_block + 1.U) << blockOffset.U
val s_idle :: s_acq :: s_gnt :: s_check :: s_resp :: Nil = Enum(5)
val state = RegInit(s_idle)
val (tl_out, edgesOut) = outer.atlNode.out(0)
val gnt = tl_out.d.bits
val recv_data = Reg(UInt(cacheDataBits.W))
val recv_beat = RegInit(0.U(log2Up(cacheDataBeats+1).W))
val data_bytes = VecInit(Seq.tabulate(cacheDataBits/8) { i => recv_data(8 * (i + 1) - 1, 8 * i) })
val zero_match = data_bytes.map(_ === 0.U)
val needle_match = data_bytes.map(_ === needle)
val first_zero = PriorityEncoder(zero_match)
val chars_found = PopCount(needle_match.zipWithIndex.map {
case (matches, i) =>
val idx = Cat(recv_beat - 1.U, i.U(beatOffset.W))
matches && idx >= offset && i.U <= first_zero
})
val zero_found = zero_match.reduce(_ || _)
val finished = Reg(Bool())
io.cmd.ready := (state === s_idle)
io.resp.valid := (state === s_resp)
io.resp.bits.rd := resp_rd
io.resp.bits.data := count
tl_out.a.valid := (state === s_acq)
tl_out.a.bits := edgesOut.Get(
fromSource = 0.U,
toAddress = addr_block << blockOffset,
lgSize = lgCacheBlockBytes.U)._2
tl_out.d.ready := (state === s_gnt)
when (io.cmd.fire) {
addr := io.cmd.bits.rs1
needle := io.cmd.bits.rs2
resp_rd := io.cmd.bits.inst.rd
count := 0.U
finished := false.B
state := s_acq
}
when (tl_out.a.fire) { state := s_gnt }
when (tl_out.d.fire) {
recv_beat := recv_beat + 1.U
recv_data := gnt.data
state := s_check
}
when (state === s_check) {
when (!finished) {
count := count + chars_found
}
when (zero_found) { finished := true.B }
when (recv_beat === cacheDataBeats.U) {
addr := next_addr
state := Mux(zero_found || finished, s_resp, s_acq)
recv_beat := 0.U
} .otherwise {
state := s_gnt
}
}
when (io.resp.fire) { state := s_idle }
io.busy := (state =/= s_idle)
io.interrupt := false.B
io.mem.req.valid := false.B
// Tie off unused channels
tl_out.b.ready := true.B
tl_out.c.valid := false.B
tl_out.e.valid := false.B
}
class BlackBoxExample(opcodes: OpcodeSet, blackBoxFile: String)(implicit p: Parameters)
extends LazyRoCC(opcodes) {
override lazy val module = new BlackBoxExampleModuleImp(this, blackBoxFile)
}
class BlackBoxExampleModuleImp(outer: BlackBoxExample, blackBoxFile: String)(implicit p: Parameters)
extends LazyRoCCModuleImp(outer)
with RequireSyncReset
with HasCoreParameters {
val blackbox = {
val roccIo = io
Module(
new BlackBox( Map( "xLen" -> IntParam(xLen),
"PRV_SZ" -> IntParam(PRV.SZ),
"coreMaxAddrBits" -> IntParam(coreMaxAddrBits),
"dcacheReqTagBits" -> IntParam(roccIo.mem.req.bits.tag.getWidth),
"M_SZ" -> IntParam(M_SZ),
"mem_req_bits_size_width" -> IntParam(roccIo.mem.req.bits.size.getWidth),
"coreDataBits" -> IntParam(coreDataBits),
"coreDataBytes" -> IntParam(coreDataBytes),
"paddrBits" -> IntParam(paddrBits),
"vaddrBitsExtended" -> IntParam(vaddrBitsExtended),
"FPConstants_RM_SZ" -> IntParam(FPConstants.RM_SZ),
"fLen" -> IntParam(fLen),
"FPConstants_FLAGS_SZ" -> IntParam(FPConstants.FLAGS_SZ)
) ) with HasBlackBoxResource {
val io = IO( new Bundle {
val clock = Input(Clock())
val reset = Input(Reset())
val rocc = chiselTypeOf(roccIo)
})
override def desiredName: String = blackBoxFile
addResource(s"/vsrc/$blackBoxFile.v")
}
)
}
blackbox.io.clock := clock
blackbox.io.reset := reset
blackbox.io.rocc.cmd <> io.cmd
io.resp <> blackbox.io.rocc.resp
io.mem <> blackbox.io.rocc.mem
io.busy := blackbox.io.rocc.busy
io.interrupt := blackbox.io.rocc.interrupt
blackbox.io.rocc.exception := io.exception
io.ptw <> blackbox.io.rocc.ptw
io.fpu_req <> blackbox.io.rocc.fpu_req
blackbox.io.rocc.fpu_resp <> io.fpu_resp
}
class OpcodeSet(val opcodes: Seq[UInt]) {
def |(set: OpcodeSet) =
new OpcodeSet(this.opcodes ++ set.opcodes)
def matches(oc: UInt) = opcodes.map(_ === oc).reduce(_ || _)
}
object OpcodeSet {
def custom0 = new OpcodeSet(Seq("b0001011".U))
def custom1 = new OpcodeSet(Seq("b0101011".U))
def custom2 = new OpcodeSet(Seq("b1011011".U))
def custom3 = new OpcodeSet(Seq("b1111011".U))
def all = custom0 | custom1 | custom2 | custom3
}
class RoccCommandRouter(opcodes: Seq[OpcodeSet])(implicit p: Parameters)
extends CoreModule()(p) {
val io = IO(new Bundle {
val in = Flipped(Decoupled(new RoCCCommand))
val out = Vec(opcodes.size, Decoupled(new RoCCCommand))
val busy = Output(Bool())
})
val cmd = Queue(io.in)
val cmdReadys = io.out.zip(opcodes).map { case (out, opcode) =>
val me = opcode.matches(cmd.bits.inst.opcode)
out.valid := cmd.valid && me
out.bits := cmd.bits
out.ready && me
}
cmd.ready := cmdReadys.reduce(_ || _)
io.busy := cmd.valid
assert(PopCount(cmdReadys) <= 1.U,
"Custom opcode matched for more than one accelerator")
}
File Protocol.scala:
package rerocc.bus
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tile._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util._
import rerocc.client.{ReRoCCClientParams}
import rerocc.manager.{ReRoCCManagerParams}
object ReRoCCProtocol {
val width = 3
val mAcquire = 0.U(width.W)
// beat0: data = inst
// beat1: data = mstatus[63:0]
// beat2: data = mstatus[127:64]
val mInst = 1.U(width.W)
// beat0: data = mstatus[63:0]
// beat1: data = mstatus[127:0]
val mUStatus = 2.U(width.W)
// beat0: data = ptbr
val mUPtbr = 3.U(width.W)
val mRelease = 4.U(width.W)
val mUnbusy = 5.U(width.W)
// data
// data = acquired
val sAcqResp = 0.U(width.W)
// data = 0
val sInstAck = 1.U(width.W)
// beat0: data = data
// beat1: data = rd
val sWrite = 2.U(width.W)
val sRelResp = 3.U(width.W)
val sUnbusyAck = 4.U(width.W)
val MAX_BEATS = 3
}
class ReRoCCMsgBundle(val params: ReRoCCBundleParams) extends Bundle {
val opcode = UInt(ReRoCCProtocol.width.W)
val client_id = UInt(params.clientIdBits.W)
val manager_id = UInt(params.managerIdBits.W)
val data = UInt(64.W)
}
object ReRoCCMsgFirstLast {
def apply(m: DecoupledIO[ReRoCCMsgBundle], isReq: Boolean): (Bool, Bool, UInt) = {
val beat = RegInit(0.U(log2Ceil(ReRoCCProtocol.MAX_BEATS).W))
val max_beat = RegInit(0.U(log2Ceil(ReRoCCProtocol.MAX_BEATS).W))
val first = beat === 0.U
val last = Wire(Bool())
val inst = m.bits.data.asTypeOf(new RoCCInstruction)
when (m.fire && first) {
max_beat := 0.U
if (isReq) {
when (m.bits.opcode === ReRoCCProtocol.mInst) {
max_beat := inst.xs1 +& inst.xs2
} .elsewhen (m.bits.opcode === ReRoCCProtocol.mUStatus) {
max_beat := 1.U
}
} else {
when (m.bits.opcode === ReRoCCProtocol.sWrite) {
max_beat := 1.U
}
}
}
last := true.B
if (isReq) {
when (m.bits.opcode === ReRoCCProtocol.mUStatus) {
last := beat === max_beat && !first
} .elsewhen (m.bits.opcode === ReRoCCProtocol.mInst) {
last := Mux(first, !inst.xs1 && !inst.xs2, beat === max_beat)
}
} else {
when (m.bits.opcode === ReRoCCProtocol.sWrite) {
last := beat === max_beat && !first
}
}
when (m.fire) { beat := beat + 1.U }
when (m.fire && last) {
max_beat := 0.U
beat := 0.U
}
(first, last, beat)
}
}
class ReRoCCBundle(val params: ReRoCCBundleParams) extends Bundle {
val req = Decoupled(new ReRoCCMsgBundle(params))
val resp = Flipped(Decoupled(new ReRoCCMsgBundle(params)))
}
case class EmptyParams()
object ReRoCCImp extends SimpleNodeImp[ReRoCCClientPortParams, ReRoCCManagerPortParams, ReRoCCEdgeParams, ReRoCCBundle] {
def edge(pd: ReRoCCClientPortParams, pu: ReRoCCManagerPortParams, p: Parameters, sourceInfo: SourceInfo) = {
ReRoCCEdgeParams(pu, pd)
}
def bundle(e: ReRoCCEdgeParams) = new ReRoCCBundle(e.bundle)
def render(ei: ReRoCCEdgeParams) = RenderedEdge(colour = "#000000" /* black */)
}
case class ReRoCCClientNode(clientParams: ReRoCCClientParams)(implicit valName: ValName) extends SourceNode(ReRoCCImp)(Seq(ReRoCCClientPortParams(Seq(clientParams))))
case class ReRoCCManagerNode(managerParams: ReRoCCManagerParams)(implicit valName: ValName) extends SinkNode(ReRoCCImp)(Seq(ReRoCCManagerPortParams(Seq(managerParams))))
class ReRoCCBuffer(b: BufferParams = BufferParams.default)(implicit p: Parameters) extends LazyModule {
val node = new AdapterNode(ReRoCCImp)({s => s}, {s => s})
lazy val module = new LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, _), (out, _)) =>
out.req <> b(in.req)
in.resp <> b(out.resp)
}
}
}
object ReRoCCBuffer {
def apply(b: BufferParams = BufferParams.default)(implicit p: Parameters) = {
val rerocc_buffer = LazyModule(new ReRoCCBuffer(b)(p))
rerocc_buffer.node
}
}
case class ReRoCCIdentityNode()(implicit valName: ValName) extends IdentityNode(ReRoCCImp)()
File WidthWidget.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.AddressSet
import freechips.rocketchip.util.{Repeater, UIntToOH1}
// innBeatBytes => the new client-facing bus width
class TLWidthWidget(innerBeatBytes: Int)(implicit p: Parameters) extends LazyModule
{
private def noChangeRequired(manager: TLManagerPortParameters) = manager.beatBytes == innerBeatBytes
val node = new TLAdapterNode(
clientFn = { case c => c },
managerFn = { case m => m.v1copy(beatBytes = innerBeatBytes) }){
override def circuitIdentity = edges.out.map(_.manager).forall(noChangeRequired)
}
override lazy val desiredName = s"TLWidthWidget$innerBeatBytes"
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def merge[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T]) = {
val inBytes = edgeIn.manager.beatBytes
val outBytes = edgeOut.manager.beatBytes
val ratio = outBytes / inBytes
val keepBits = log2Ceil(outBytes)
val dropBits = log2Ceil(inBytes)
val countBits = log2Ceil(ratio)
val size = edgeIn.size(in.bits)
val hasData = edgeIn.hasData(in.bits)
val limit = UIntToOH1(size, keepBits) >> dropBits
val count = RegInit(0.U(countBits.W))
val first = count === 0.U
val last = count === limit || !hasData
val enable = Seq.tabulate(ratio) { i => !((count ^ i.U) & limit).orR }
val corrupt_reg = RegInit(false.B)
val corrupt_in = edgeIn.corrupt(in.bits)
val corrupt_out = corrupt_in || corrupt_reg
when (in.fire) {
count := count + 1.U
corrupt_reg := corrupt_out
when (last) {
count := 0.U
corrupt_reg := false.B
}
}
def helper(idata: UInt): UInt = {
// rdata is X until the first time a multi-beat write occurs.
// Prevent the X from leaking outside by jamming the mux control until
// the first time rdata is written (and hence no longer X).
val rdata_written_once = RegInit(false.B)
val masked_enable = enable.map(_ || !rdata_written_once)
val odata = Seq.fill(ratio) { WireInit(idata) }
val rdata = Reg(Vec(ratio-1, chiselTypeOf(idata)))
val pdata = rdata :+ idata
val mdata = (masked_enable zip (odata zip pdata)) map { case (e, (o, p)) => Mux(e, o, p) }
when (in.fire && !last) {
rdata_written_once := true.B
(rdata zip mdata) foreach { case (r, m) => r := m }
}
Cat(mdata.reverse)
}
in.ready := out.ready || !last
out.valid := in.valid && last
out.bits := in.bits
// Don't put down hardware if we never carry data
edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits)))
edgeOut.corrupt(out.bits) := corrupt_out
(out.bits, in.bits) match {
case (o: TLBundleA, i: TLBundleA) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W))
case (o: TLBundleB, i: TLBundleB) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W))
case (o: TLBundleC, i: TLBundleC) => ()
case (o: TLBundleD, i: TLBundleD) => ()
case _ => require(false, "Impossible bundle combination in WidthWidget")
}
}
def split[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = {
val inBytes = edgeIn.manager.beatBytes
val outBytes = edgeOut.manager.beatBytes
val ratio = inBytes / outBytes
val keepBits = log2Ceil(inBytes)
val dropBits = log2Ceil(outBytes)
val countBits = log2Ceil(ratio)
val size = edgeIn.size(in.bits)
val hasData = edgeIn.hasData(in.bits)
val limit = UIntToOH1(size, keepBits) >> dropBits
val count = RegInit(0.U(countBits.W))
val first = count === 0.U
val last = count === limit || !hasData
when (out.fire) {
count := count + 1.U
when (last) { count := 0.U }
}
// For sub-beat transfer, extract which part matters
val sel = in.bits match {
case a: TLBundleA => a.address(keepBits-1, dropBits)
case b: TLBundleB => b.address(keepBits-1, dropBits)
case c: TLBundleC => c.address(keepBits-1, dropBits)
case d: TLBundleD => {
val sel = sourceMap(d.source)
val hold = Mux(first, sel, RegEnable(sel, first)) // a_first is not for whole xfer
hold & ~limit // if more than one a_first/xfer, the address must be aligned anyway
}
}
val index = sel | count
def helper(idata: UInt, width: Int): UInt = {
val mux = VecInit.tabulate(ratio) { i => idata((i+1)*outBytes*width-1, i*outBytes*width) }
mux(index)
}
out.bits := in.bits
out.valid := in.valid
in.ready := out.ready
// Don't put down hardware if we never carry data
edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits), 8))
(out.bits, in.bits) match {
case (o: TLBundleA, i: TLBundleA) => o.mask := helper(i.mask, 1)
case (o: TLBundleB, i: TLBundleB) => o.mask := helper(i.mask, 1)
case (o: TLBundleC, i: TLBundleC) => () // replicating corrupt to all beats is ok
case (o: TLBundleD, i: TLBundleD) => ()
case _ => require(false, "Impossbile bundle combination in WidthWidget")
}
// Repeat the input if we're not last
!last
}
def splice[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = {
if (edgeIn.manager.beatBytes == edgeOut.manager.beatBytes) {
// nothing to do; pass it through
out.bits := in.bits
out.valid := in.valid
in.ready := out.ready
} else if (edgeIn.manager.beatBytes > edgeOut.manager.beatBytes) {
// split input to output
val repeat = Wire(Bool())
val repeated = Repeater(in, repeat)
val cated = Wire(chiselTypeOf(repeated))
cated <> repeated
edgeIn.data(cated.bits) := Cat(
edgeIn.data(repeated.bits)(edgeIn.manager.beatBytes*8-1, edgeOut.manager.beatBytes*8),
edgeIn.data(in.bits)(edgeOut.manager.beatBytes*8-1, 0))
repeat := split(edgeIn, cated, edgeOut, out, sourceMap)
} else {
// merge input to output
merge(edgeIn, in, edgeOut, out)
}
}
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
// If the master is narrower than the slave, the D channel must be narrowed.
// This is tricky, because the D channel has no address data.
// Thus, you don't know which part of a sub-beat transfer to extract.
// To fix this, we record the relevant address bits for all sources.
// The assumption is that this sort of situation happens only where
// you connect a narrow master to the system bus, so there are few sources.
def sourceMap(source_bits: UInt) = {
val source = if (edgeIn.client.endSourceId == 1) 0.U(0.W) else source_bits
require (edgeOut.manager.beatBytes > edgeIn.manager.beatBytes)
val keepBits = log2Ceil(edgeOut.manager.beatBytes)
val dropBits = log2Ceil(edgeIn.manager.beatBytes)
val sources = Reg(Vec(edgeIn.client.endSourceId, UInt((keepBits-dropBits).W)))
val a_sel = in.a.bits.address(keepBits-1, dropBits)
when (in.a.fire) {
if (edgeIn.client.endSourceId == 1) { // avoid extraction-index-width warning
sources(0) := a_sel
} else {
sources(in.a.bits.source) := a_sel
}
}
// depopulate unused source registers:
edgeIn.client.unusedSources.foreach { id => sources(id) := 0.U }
val bypass = in.a.valid && in.a.bits.source === source
if (edgeIn.manager.minLatency > 0) sources(source)
else Mux(bypass, a_sel, sources(source))
}
splice(edgeIn, in.a, edgeOut, out.a, sourceMap)
splice(edgeOut, out.d, edgeIn, in.d, sourceMap)
if (edgeOut.manager.anySupportAcquireB && edgeIn.client.anySupportProbe) {
splice(edgeOut, out.b, edgeIn, in.b, sourceMap)
splice(edgeIn, in.c, edgeOut, out.c, sourceMap)
out.e.valid := in.e.valid
out.e.bits := in.e.bits
in.e.ready := out.e.ready
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLWidthWidget
{
def apply(innerBeatBytes: Int)(implicit p: Parameters): TLNode =
{
val widget = LazyModule(new TLWidthWidget(innerBeatBytes))
widget.node
}
def apply(wrapper: TLBusWrapper)(implicit p: Parameters): TLNode = apply(wrapper.beatBytes)
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMWidthWidget(first: Int, second: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("WidthWidget"))
val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff)))
(ram.node
:= TLDelayer(0.1)
:= TLFragmenter(4, 256)
:= TLWidthWidget(second)
:= TLWidthWidget(first)
:= TLDelayer(0.1)
:= model.node
:= fuzz.node)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMWidthWidgetTest(little: Int, big: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMWidthWidget(little,big,txns)).module)
dut.io.start := DontCare
io.finished := dut.io.finished
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
File Xbar.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressDecoder, AddressSet, RegionType, IdRange, TriStateValue}
import freechips.rocketchip.util.BundleField
// Trades off slave port proximity against routing resource cost
object ForceFanout
{
def apply[T](
a: TriStateValue = TriStateValue.unset,
b: TriStateValue = TriStateValue.unset,
c: TriStateValue = TriStateValue.unset,
d: TriStateValue = TriStateValue.unset,
e: TriStateValue = TriStateValue.unset)(body: Parameters => T)(implicit p: Parameters) =
{
body(p.alterPartial {
case ForceFanoutKey => p(ForceFanoutKey) match {
case ForceFanoutParams(pa, pb, pc, pd, pe) =>
ForceFanoutParams(a.update(pa), b.update(pb), c.update(pc), d.update(pd), e.update(pe))
}
})
}
}
private case class ForceFanoutParams(a: Boolean, b: Boolean, c: Boolean, d: Boolean, e: Boolean)
private case object ForceFanoutKey extends Field(ForceFanoutParams(false, false, false, false, false))
class TLXbar(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters) extends LazyModule
{
val node = new TLNexusNode(
clientFn = { seq =>
seq(0).v1copy(
echoFields = BundleField.union(seq.flatMap(_.echoFields)),
requestFields = BundleField.union(seq.flatMap(_.requestFields)),
responseKeys = seq.flatMap(_.responseKeys).distinct,
minLatency = seq.map(_.minLatency).min,
clients = (TLXbar.mapInputIds(seq) zip seq) flatMap { case (range, port) =>
port.clients map { client => client.v1copy(
sourceId = client.sourceId.shift(range.start)
)}
}
)
},
managerFn = { seq =>
val fifoIdFactory = TLXbar.relabeler()
seq(0).v1copy(
responseFields = BundleField.union(seq.flatMap(_.responseFields)),
requestKeys = seq.flatMap(_.requestKeys).distinct,
minLatency = seq.map(_.minLatency).min,
endSinkId = TLXbar.mapOutputIds(seq).map(_.end).max,
managers = seq.flatMap { port =>
require (port.beatBytes == seq(0).beatBytes,
s"Xbar ($name with parent $parent) data widths don't match: ${port.managers.map(_.name)} has ${port.beatBytes}B vs ${seq(0).managers.map(_.name)} has ${seq(0).beatBytes}B")
val fifoIdMapper = fifoIdFactory()
port.managers map { manager => manager.v1copy(
fifoId = manager.fifoId.map(fifoIdMapper(_))
)}
}
)
}
){
override def circuitIdentity = outputs.size == 1 && inputs.size == 1
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
if ((node.in.size * node.out.size) > (8*32)) {
println (s"!!! WARNING !!!")
println (s" Your TLXbar ($name with parent $parent) is very large, with ${node.in.size} Masters and ${node.out.size} Slaves.")
println (s"!!! WARNING !!!")
}
val wide_bundle = TLBundleParameters.union((node.in ++ node.out).map(_._2.bundle))
override def desiredName = (Seq("TLXbar") ++ nameSuffix ++ Seq(s"i${node.in.size}_o${node.out.size}_${wide_bundle.shortName}")).mkString("_")
TLXbar.circuit(policy, node.in, node.out)
}
}
object TLXbar
{
def mapInputIds(ports: Seq[TLMasterPortParameters]) = assignRanges(ports.map(_.endSourceId))
def mapOutputIds(ports: Seq[TLSlavePortParameters]) = assignRanges(ports.map(_.endSinkId))
def assignRanges(sizes: Seq[Int]) = {
val pow2Sizes = sizes.map { z => if (z == 0) 0 else 1 << log2Ceil(z) }
val tuples = pow2Sizes.zipWithIndex.sortBy(_._1) // record old index, then sort by increasing size
val starts = tuples.scanRight(0)(_._1 + _).tail // suffix-sum of the sizes = the start positions
val ranges = (tuples zip starts) map { case ((sz, i), st) =>
(if (sz == 0) IdRange(0, 0) else IdRange(st, st + sz), i)
}
ranges.sortBy(_._2).map(_._1) // Restore orignal order
}
def relabeler() = {
var idFactory = 0
() => {
val fifoMap = scala.collection.mutable.HashMap.empty[Int, Int]
(x: Int) => {
if (fifoMap.contains(x)) fifoMap(x) else {
val out = idFactory
idFactory = idFactory + 1
fifoMap += (x -> out)
out
}
}
}
}
def circuit(policy: TLArbiter.Policy, seqIn: Seq[(TLBundle, TLEdge)], seqOut: Seq[(TLBundle, TLEdge)]) {
val (io_in, edgesIn) = seqIn.unzip
val (io_out, edgesOut) = seqOut.unzip
// Not every master need connect to every slave on every channel; determine which connections are necessary
val reachableIO = edgesIn.map { cp => edgesOut.map { mp =>
cp.client.clients.exists { c => mp.manager.managers.exists { m =>
c.visibility.exists { ca => m.address.exists { ma =>
ca.overlaps(ma)}}}}
}.toVector}.toVector
val probeIO = (edgesIn zip reachableIO).map { case (cp, reachableO) =>
(edgesOut zip reachableO).map { case (mp, reachable) =>
reachable && cp.client.anySupportProbe && mp.manager.managers.exists(_.regionType >= RegionType.TRACKED)
}.toVector}.toVector
val releaseIO = (edgesIn zip reachableIO).map { case (cp, reachableO) =>
(edgesOut zip reachableO).map { case (mp, reachable) =>
reachable && cp.client.anySupportProbe && mp.manager.anySupportAcquireB
}.toVector}.toVector
val connectAIO = reachableIO
val connectBIO = probeIO
val connectCIO = releaseIO
val connectDIO = reachableIO
val connectEIO = releaseIO
def transpose[T](x: Seq[Seq[T]]) = if (x.isEmpty) Nil else Vector.tabulate(x(0).size) { i => Vector.tabulate(x.size) { j => x(j)(i) } }
val connectAOI = transpose(connectAIO)
val connectBOI = transpose(connectBIO)
val connectCOI = transpose(connectCIO)
val connectDOI = transpose(connectDIO)
val connectEOI = transpose(connectEIO)
// Grab the port ID mapping
val inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client))
val outputIdRanges = TLXbar.mapOutputIds(edgesOut.map(_.manager))
// We need an intermediate size of bundle with the widest possible identifiers
val wide_bundle = TLBundleParameters.union(io_in.map(_.params) ++ io_out.map(_.params))
// Handle size = 1 gracefully (Chisel3 empty range is broken)
def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0)
// Transform input bundle sources (sinks use global namespace on both sides)
val in = Wire(Vec(io_in.size, TLBundle(wide_bundle)))
for (i <- 0 until in.size) {
val r = inputIdRanges(i)
if (connectAIO(i).exists(x=>x)) {
in(i).a.bits.user := DontCare
in(i).a.squeezeAll.waiveAll :<>= io_in(i).a.squeezeAll.waiveAll
in(i).a.bits.source := io_in(i).a.bits.source | r.start.U
} else {
in(i).a := DontCare
io_in(i).a := DontCare
in(i).a.valid := false.B
io_in(i).a.ready := true.B
}
if (connectBIO(i).exists(x=>x)) {
io_in(i).b.squeezeAll :<>= in(i).b.squeezeAll
io_in(i).b.bits.source := trim(in(i).b.bits.source, r.size)
} else {
in(i).b := DontCare
io_in(i).b := DontCare
in(i).b.ready := true.B
io_in(i).b.valid := false.B
}
if (connectCIO(i).exists(x=>x)) {
in(i).c.bits.user := DontCare
in(i).c.squeezeAll.waiveAll :<>= io_in(i).c.squeezeAll.waiveAll
in(i).c.bits.source := io_in(i).c.bits.source | r.start.U
} else {
in(i).c := DontCare
io_in(i).c := DontCare
in(i).c.valid := false.B
io_in(i).c.ready := true.B
}
if (connectDIO(i).exists(x=>x)) {
io_in(i).d.squeezeAll.waiveAll :<>= in(i).d.squeezeAll.waiveAll
io_in(i).d.bits.source := trim(in(i).d.bits.source, r.size)
} else {
in(i).d := DontCare
io_in(i).d := DontCare
in(i).d.ready := true.B
io_in(i).d.valid := false.B
}
if (connectEIO(i).exists(x=>x)) {
in(i).e.squeezeAll :<>= io_in(i).e.squeezeAll
} else {
in(i).e := DontCare
io_in(i).e := DontCare
in(i).e.valid := false.B
io_in(i).e.ready := true.B
}
}
// Transform output bundle sinks (sources use global namespace on both sides)
val out = Wire(Vec(io_out.size, TLBundle(wide_bundle)))
for (o <- 0 until out.size) {
val r = outputIdRanges(o)
if (connectAOI(o).exists(x=>x)) {
out(o).a.bits.user := DontCare
io_out(o).a.squeezeAll.waiveAll :<>= out(o).a.squeezeAll.waiveAll
} else {
out(o).a := DontCare
io_out(o).a := DontCare
out(o).a.ready := true.B
io_out(o).a.valid := false.B
}
if (connectBOI(o).exists(x=>x)) {
out(o).b.squeezeAll :<>= io_out(o).b.squeezeAll
} else {
out(o).b := DontCare
io_out(o).b := DontCare
out(o).b.valid := false.B
io_out(o).b.ready := true.B
}
if (connectCOI(o).exists(x=>x)) {
out(o).c.bits.user := DontCare
io_out(o).c.squeezeAll.waiveAll :<>= out(o).c.squeezeAll.waiveAll
} else {
out(o).c := DontCare
io_out(o).c := DontCare
out(o).c.ready := true.B
io_out(o).c.valid := false.B
}
if (connectDOI(o).exists(x=>x)) {
out(o).d.squeezeAll :<>= io_out(o).d.squeezeAll
out(o).d.bits.sink := io_out(o).d.bits.sink | r.start.U
} else {
out(o).d := DontCare
io_out(o).d := DontCare
out(o).d.valid := false.B
io_out(o).d.ready := true.B
}
if (connectEOI(o).exists(x=>x)) {
io_out(o).e.squeezeAll :<>= out(o).e.squeezeAll
io_out(o).e.bits.sink := trim(out(o).e.bits.sink, r.size)
} else {
out(o).e := DontCare
io_out(o).e := DontCare
out(o).e.ready := true.B
io_out(o).e.valid := false.B
}
}
// Filter a list to only those elements selected
def filter[T](data: Seq[T], mask: Seq[Boolean]) = (data zip mask).filter(_._2).map(_._1)
// Based on input=>output connectivity, create per-input minimal address decode circuits
val requiredAC = (connectAIO ++ connectCIO).distinct
val outputPortFns: Map[Vector[Boolean], Seq[UInt => Bool]] = requiredAC.map { connectO =>
val port_addrs = edgesOut.map(_.manager.managers.flatMap(_.address))
val routingMask = AddressDecoder(filter(port_addrs, connectO))
val route_addrs = port_addrs.map(seq => AddressSet.unify(seq.map(_.widen(~routingMask)).distinct))
// Print the address mapping
if (false) {
println("Xbar mapping:")
route_addrs.foreach { p =>
print(" ")
p.foreach { a => print(s" ${a}") }
println("")
}
println("--")
}
(connectO, route_addrs.map(seq => (addr: UInt) => seq.map(_.contains(addr)).reduce(_ || _)))
}.toMap
// Print the ID mapping
if (false) {
println(s"XBar mapping:")
(edgesIn zip inputIdRanges).zipWithIndex.foreach { case ((edge, id), i) =>
println(s"\t$i assigned ${id} for ${edge.client.clients.map(_.name).mkString(", ")}")
}
println("")
}
val addressA = (in zip edgesIn) map { case (i, e) => e.address(i.a.bits) }
val addressC = (in zip edgesIn) map { case (i, e) => e.address(i.c.bits) }
def unique(x: Vector[Boolean]): Bool = (x.filter(x=>x).size <= 1).B
val requestAIO = (connectAIO zip addressA) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } }
val requestCIO = (connectCIO zip addressC) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } }
val requestBOI = out.map { o => inputIdRanges.map { i => i.contains(o.b.bits.source) } }
val requestDOI = out.map { o => inputIdRanges.map { i => i.contains(o.d.bits.source) } }
val requestEIO = in.map { i => outputIdRanges.map { o => o.contains(i.e.bits.sink) } }
val beatsAI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.a.bits) }
val beatsBO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.b.bits) }
val beatsCI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.c.bits) }
val beatsDO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.d.bits) }
val beatsEI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.e.bits) }
// Fanout the input sources to the output sinks
val portsAOI = transpose((in zip requestAIO) map { case (i, r) => TLXbar.fanout(i.a, r, edgesOut.map(_.params(ForceFanoutKey).a)) })
val portsBIO = transpose((out zip requestBOI) map { case (o, r) => TLXbar.fanout(o.b, r, edgesIn .map(_.params(ForceFanoutKey).b)) })
val portsCOI = transpose((in zip requestCIO) map { case (i, r) => TLXbar.fanout(i.c, r, edgesOut.map(_.params(ForceFanoutKey).c)) })
val portsDIO = transpose((out zip requestDOI) map { case (o, r) => TLXbar.fanout(o.d, r, edgesIn .map(_.params(ForceFanoutKey).d)) })
val portsEOI = transpose((in zip requestEIO) map { case (i, r) => TLXbar.fanout(i.e, r, edgesOut.map(_.params(ForceFanoutKey).e)) })
// Arbitrate amongst the sources
for (o <- 0 until out.size) {
TLArbiter(policy)(out(o).a, filter(beatsAI zip portsAOI(o), connectAOI(o)):_*)
TLArbiter(policy)(out(o).c, filter(beatsCI zip portsCOI(o), connectCOI(o)):_*)
TLArbiter(policy)(out(o).e, filter(beatsEI zip portsEOI(o), connectEOI(o)):_*)
filter(portsAOI(o), connectAOI(o).map(!_)) foreach { r => r.ready := false.B }
filter(portsCOI(o), connectCOI(o).map(!_)) foreach { r => r.ready := false.B }
filter(portsEOI(o), connectEOI(o).map(!_)) foreach { r => r.ready := false.B }
}
for (i <- 0 until in.size) {
TLArbiter(policy)(in(i).b, filter(beatsBO zip portsBIO(i), connectBIO(i)):_*)
TLArbiter(policy)(in(i).d, filter(beatsDO zip portsDIO(i), connectDIO(i)):_*)
filter(portsBIO(i), connectBIO(i).map(!_)) foreach { r => r.ready := false.B }
filter(portsDIO(i), connectDIO(i).map(!_)) foreach { r => r.ready := false.B }
}
}
def apply(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode =
{
val xbar = LazyModule(new TLXbar(policy, nameSuffix))
xbar.node
}
// Replicate an input port to each output port
def fanout[T <: TLChannel](input: DecoupledIO[T], select: Seq[Bool], force: Seq[Boolean] = Nil): Seq[DecoupledIO[T]] = {
val filtered = Wire(Vec(select.size, chiselTypeOf(input)))
for (i <- 0 until select.size) {
filtered(i).bits := (if (force.lift(i).getOrElse(false)) IdentityModule(input.bits) else input.bits)
filtered(i).valid := input.valid && (select(i) || (select.size == 1).B)
}
input.ready := Mux1H(select, filtered.map(_.ready))
filtered
}
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMXbar(nManagers: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("Xbar"))
val xbar = LazyModule(new TLXbar)
xbar.node := TLDelayer(0.1) := model.node := fuzz.node
(0 until nManagers) foreach { n =>
val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff)))
ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMXbarTest(nManagers: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMXbar(nManagers,txns)).module)
dut.io.start := io.start
io.finished := dut.io.finished
}
class TLMulticlientXbar(nManagers: Int, nClients: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val xbar = LazyModule(new TLXbar)
val fuzzers = (0 until nClients) map { n =>
val fuzz = LazyModule(new TLFuzzer(txns))
xbar.node := TLDelayer(0.1) := fuzz.node
fuzz
}
(0 until nManagers) foreach { n =>
val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff)))
ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzzers.last.module.io.finished
}
}
class TLMulticlientXbarTest(nManagers: Int, nClients: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLMulticlientXbar(nManagers, nClients, txns)).module)
dut.io.start := io.start
io.finished := dut.io.finished
}
| module ReRoCCManagerTile_2( // @[Manager.scala:237:34]
input clock, // @[Manager.scala:237:34]
input reset, // @[Manager.scala:237:34]
output auto_ctrl_ctrl_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_ctrl_ctrl_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_ctrl_ctrl_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_ctrl_ctrl_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_ctrl_ctrl_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [6:0] auto_ctrl_ctrl_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [11:0] auto_ctrl_ctrl_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_ctrl_ctrl_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_ctrl_ctrl_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_ctrl_ctrl_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_ctrl_ctrl_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_ctrl_ctrl_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_ctrl_ctrl_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_ctrl_ctrl_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [6:0] auto_ctrl_ctrl_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_ctrl_ctrl_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_buffer_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_buffer_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_buffer_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_buffer_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_buffer_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_buffer_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_b_ready, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_b_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_buffer_out_b_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_buffer_out_b_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_buffer_out_b_bits_size, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_b_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_buffer_out_b_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_buffer_out_b_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_buffer_out_b_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_b_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_c_ready, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_c_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_buffer_out_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_buffer_out_c_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_buffer_out_c_bits_size, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_c_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_buffer_out_c_bits_address, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_buffer_out_c_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_buffer_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_buffer_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_buffer_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_buffer_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_buffer_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_e_ready, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_e_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_buffer_out_e_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_re_ro_cc_in_req_ready, // @[LazyModuleImp.scala:107:25]
input auto_re_ro_cc_in_req_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_re_ro_cc_in_req_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_re_ro_cc_in_req_bits_client_id, // @[LazyModuleImp.scala:107:25]
input auto_re_ro_cc_in_req_bits_manager_id, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_re_ro_cc_in_req_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_re_ro_cc_in_resp_ready, // @[LazyModuleImp.scala:107:25]
output auto_re_ro_cc_in_resp_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_re_ro_cc_in_resp_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_re_ro_cc_in_resp_bits_client_id, // @[LazyModuleImp.scala:107:25]
output auto_re_ro_cc_in_resp_bits_manager_id, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_re_ro_cc_in_resp_bits_data, // @[LazyModuleImp.scala:107:25]
input [6:0] auto_rerocc_manager_id_sink_in // @[LazyModuleImp.scala:107:25]
);
wire reRoCCNodeOut_resp_valid; // @[MixedNode.scala:542:17]
wire [63:0] reRoCCNodeOut_resp_bits_data; // @[MixedNode.scala:542:17]
wire reRoCCNodeOut_resp_bits_manager_id; // @[MixedNode.scala:542:17]
wire [3:0] reRoCCNodeOut_resp_bits_client_id; // @[MixedNode.scala:542:17]
wire [2:0] reRoCCNodeOut_resp_bits_opcode; // @[MixedNode.scala:542:17]
wire reRoCCNodeOut_req_ready; // @[MixedNode.scala:542:17]
wire widget_auto_anon_in_e_valid; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_e_bits_sink; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_ready; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_c_valid; // @[WidthWidget.scala:27:9]
wire [63:0] widget_auto_anon_in_c_bits_data; // @[WidthWidget.scala:27:9]
wire [31:0] widget_auto_anon_in_c_bits_address; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_c_bits_source; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_in_c_bits_size; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_c_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_c_bits_opcode; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_b_ready; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9]
wire [63:0] widget_auto_anon_in_a_bits_data; // @[WidthWidget.scala:27:9]
wire [7:0] widget_auto_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [31:0] widget_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_bits_source; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_in_a_bits_size; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_a_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [2:0] xbar_out_0_e_bits_sink; // @[Xbar.scala:216:19]
wire [2:0] xbar_out_0_d_bits_sink; // @[Xbar.scala:216:19]
wire xbar_in_0_d_bits_source; // @[Xbar.scala:159:18]
wire xbar_in_0_c_bits_source; // @[Xbar.scala:159:18]
wire xbar_in_0_b_bits_source; // @[Xbar.scala:159:18]
wire xbar_in_0_a_bits_source; // @[Xbar.scala:159:18]
wire xbar_auto_anon_out_e_ready; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_d_valid; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_d_bits_corrupt; // @[Xbar.scala:74:9]
wire [63:0] xbar_auto_anon_out_d_bits_data; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_d_bits_denied; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_out_d_bits_sink; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_d_bits_source; // @[Xbar.scala:74:9]
wire [3:0] xbar_auto_anon_out_d_bits_size; // @[Xbar.scala:74:9]
wire [1:0] xbar_auto_anon_out_d_bits_param; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_out_d_bits_opcode; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_c_ready; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_b_valid; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_b_bits_corrupt; // @[Xbar.scala:74:9]
wire [63:0] xbar_auto_anon_out_b_bits_data; // @[Xbar.scala:74:9]
wire [7:0] xbar_auto_anon_out_b_bits_mask; // @[Xbar.scala:74:9]
wire [31:0] xbar_auto_anon_out_b_bits_address; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_b_bits_source; // @[Xbar.scala:74:9]
wire [3:0] xbar_auto_anon_out_b_bits_size; // @[Xbar.scala:74:9]
wire [1:0] xbar_auto_anon_out_b_bits_param; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_out_b_bits_opcode; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_a_ready; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_e_valid; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_e_ready; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_in_e_bits_sink; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_d_valid; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_d_ready; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_d_bits_corrupt; // @[Xbar.scala:74:9]
wire [63:0] xbar_auto_anon_in_d_bits_data; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_d_bits_denied; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_in_d_bits_sink; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_d_bits_source; // @[Xbar.scala:74:9]
wire [3:0] xbar_auto_anon_in_d_bits_size; // @[Xbar.scala:74:9]
wire [1:0] xbar_auto_anon_in_d_bits_param; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_in_d_bits_opcode; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_c_valid; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_c_ready; // @[Xbar.scala:74:9]
wire [63:0] xbar_auto_anon_in_c_bits_data; // @[Xbar.scala:74:9]
wire [31:0] xbar_auto_anon_in_c_bits_address; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_c_bits_source; // @[Xbar.scala:74:9]
wire [3:0] xbar_auto_anon_in_c_bits_size; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_in_c_bits_param; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_in_c_bits_opcode; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_b_valid; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_b_ready; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_b_bits_corrupt; // @[Xbar.scala:74:9]
wire [63:0] xbar_auto_anon_in_b_bits_data; // @[Xbar.scala:74:9]
wire [7:0] xbar_auto_anon_in_b_bits_mask; // @[Xbar.scala:74:9]
wire [31:0] xbar_auto_anon_in_b_bits_address; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_b_bits_source; // @[Xbar.scala:74:9]
wire [3:0] xbar_auto_anon_in_b_bits_size; // @[Xbar.scala:74:9]
wire [1:0] xbar_auto_anon_in_b_bits_param; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_in_b_bits_opcode; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_a_valid; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_a_ready; // @[Xbar.scala:74:9]
wire [63:0] xbar_auto_anon_in_a_bits_data; // @[Xbar.scala:74:9]
wire [7:0] xbar_auto_anon_in_a_bits_mask; // @[Xbar.scala:74:9]
wire [31:0] xbar_auto_anon_in_a_bits_address; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_a_bits_source; // @[Xbar.scala:74:9]
wire [3:0] xbar_auto_anon_in_a_bits_size; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_in_a_bits_param; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_in_a_bits_opcode; // @[Xbar.scala:74:9]
wire _dcIF_io_cache_req_valid; // @[Manager.scala:255:22]
wire [39:0] _dcIF_io_cache_req_bits_addr; // @[Manager.scala:255:22]
wire [7:0] _dcIF_io_cache_req_bits_tag; // @[Manager.scala:255:22]
wire [1:0] _dcIF_io_cache_req_bits_dprv; // @[Manager.scala:255:22]
wire _dcIF_io_cache_req_bits_dv; // @[Manager.scala:255:22]
wire [63:0] _dcIF_io_cache_s1_data_data; // @[Manager.scala:255:22]
wire [7:0] _dcIF_io_cache_s1_data_mask; // @[Manager.scala:255:22]
wire _ptw_io_requestor_0_req_ready; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_valid; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_ae_ptw; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_ae_final; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_pf; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_gf; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_hr; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_hw; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_hx; // @[Manager.scala:243:21]
wire [9:0] _ptw_io_requestor_0_resp_bits_pte_reserved_for_future; // @[Manager.scala:243:21]
wire [43:0] _ptw_io_requestor_0_resp_bits_pte_ppn; // @[Manager.scala:243:21]
wire [1:0] _ptw_io_requestor_0_resp_bits_pte_reserved_for_software; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_pte_d; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_pte_a; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_pte_g; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_pte_u; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_pte_x; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_pte_w; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_pte_r; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_pte_v; // @[Manager.scala:243:21]
wire [1:0] _ptw_io_requestor_0_resp_bits_level; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_homogeneous; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_gpa_valid; // @[Manager.scala:243:21]
wire [38:0] _ptw_io_requestor_0_resp_bits_gpa_bits; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_gpa_is_pte; // @[Manager.scala:243:21]
wire [3:0] _ptw_io_requestor_0_ptbr_mode; // @[Manager.scala:243:21]
wire [15:0] _ptw_io_requestor_0_ptbr_asid; // @[Manager.scala:243:21]
wire [43:0] _ptw_io_requestor_0_ptbr_ppn; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_debug; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_cease; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_wfi; // @[Manager.scala:243:21]
wire [31:0] _ptw_io_requestor_0_status_isa; // @[Manager.scala:243:21]
wire [1:0] _ptw_io_requestor_0_status_dprv; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_dv; // @[Manager.scala:243:21]
wire [1:0] _ptw_io_requestor_0_status_prv; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_v; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_sd; // @[Manager.scala:243:21]
wire [22:0] _ptw_io_requestor_0_status_zero2; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_mpv; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_gva; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_mbe; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_sbe; // @[Manager.scala:243:21]
wire [1:0] _ptw_io_requestor_0_status_sxl; // @[Manager.scala:243:21]
wire [1:0] _ptw_io_requestor_0_status_uxl; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_sd_rv32; // @[Manager.scala:243:21]
wire [7:0] _ptw_io_requestor_0_status_zero1; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_tsr; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_tw; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_tvm; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_mxr; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_sum; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_mprv; // @[Manager.scala:243:21]
wire [1:0] _ptw_io_requestor_0_status_xs; // @[Manager.scala:243:21]
wire [1:0] _ptw_io_requestor_0_status_fs; // @[Manager.scala:243:21]
wire [1:0] _ptw_io_requestor_0_status_mpp; // @[Manager.scala:243:21]
wire [1:0] _ptw_io_requestor_0_status_vs; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_spp; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_mpie; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_ube; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_spie; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_upie; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_mie; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_hie; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_sie; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_uie; // @[Manager.scala:243:21]
wire _ptw_io_mem_req_valid; // @[Manager.scala:243:21]
wire [39:0] _ptw_io_mem_req_bits_addr; // @[Manager.scala:243:21]
wire _ptw_io_mem_req_bits_dv; // @[Manager.scala:243:21]
wire _ptw_io_mem_s1_kill; // @[Manager.scala:243:21]
wire _ptw_io_dpath_perf_pte_miss; // @[Manager.scala:243:21]
wire _ptw_io_dpath_clock_enabled; // @[Manager.scala:243:21]
wire _dcacheArb_io_requestor_0_req_ready; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_s2_nack; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_s2_nack_cause_raw; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_s2_uncached; // @[Manager.scala:238:27]
wire [31:0] _dcacheArb_io_requestor_0_s2_paddr; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_resp_valid; // @[Manager.scala:238:27]
wire [39:0] _dcacheArb_io_requestor_0_resp_bits_addr; // @[Manager.scala:238:27]
wire [7:0] _dcacheArb_io_requestor_0_resp_bits_tag; // @[Manager.scala:238:27]
wire [4:0] _dcacheArb_io_requestor_0_resp_bits_cmd; // @[Manager.scala:238:27]
wire [1:0] _dcacheArb_io_requestor_0_resp_bits_size; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_resp_bits_signed; // @[Manager.scala:238:27]
wire [1:0] _dcacheArb_io_requestor_0_resp_bits_dprv; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_resp_bits_dv; // @[Manager.scala:238:27]
wire [63:0] _dcacheArb_io_requestor_0_resp_bits_data; // @[Manager.scala:238:27]
wire [7:0] _dcacheArb_io_requestor_0_resp_bits_mask; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_resp_bits_replay; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_resp_bits_has_data; // @[Manager.scala:238:27]
wire [63:0] _dcacheArb_io_requestor_0_resp_bits_data_word_bypass; // @[Manager.scala:238:27]
wire [63:0] _dcacheArb_io_requestor_0_resp_bits_data_raw; // @[Manager.scala:238:27]
wire [63:0] _dcacheArb_io_requestor_0_resp_bits_store_data; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_replay_next; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_s2_xcpt_ma_ld; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_s2_xcpt_ma_st; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_s2_xcpt_pf_ld; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_s2_xcpt_pf_st; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_s2_xcpt_ae_ld; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_s2_xcpt_ae_st; // @[Manager.scala:238:27]
wire [39:0] _dcacheArb_io_requestor_0_s2_gpa; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_ordered; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_store_pending; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_perf_acquire; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_perf_release; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_perf_grant; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_perf_tlbMiss; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_perf_blocked; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_perf_canAcceptStoreThenLoad; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_perf_canAcceptStoreThenRMW; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_perf_canAcceptLoadThenLoad; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterLoad; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterStore; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_req_ready; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_s2_nack; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_s2_nack_cause_raw; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_s2_uncached; // @[Manager.scala:238:27]
wire [31:0] _dcacheArb_io_requestor_1_s2_paddr; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_resp_valid; // @[Manager.scala:238:27]
wire [39:0] _dcacheArb_io_requestor_1_resp_bits_addr; // @[Manager.scala:238:27]
wire [7:0] _dcacheArb_io_requestor_1_resp_bits_tag; // @[Manager.scala:238:27]
wire [4:0] _dcacheArb_io_requestor_1_resp_bits_cmd; // @[Manager.scala:238:27]
wire [1:0] _dcacheArb_io_requestor_1_resp_bits_size; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_resp_bits_signed; // @[Manager.scala:238:27]
wire [1:0] _dcacheArb_io_requestor_1_resp_bits_dprv; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_resp_bits_dv; // @[Manager.scala:238:27]
wire [63:0] _dcacheArb_io_requestor_1_resp_bits_data; // @[Manager.scala:238:27]
wire [7:0] _dcacheArb_io_requestor_1_resp_bits_mask; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_resp_bits_replay; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_resp_bits_has_data; // @[Manager.scala:238:27]
wire [63:0] _dcacheArb_io_requestor_1_resp_bits_data_word_bypass; // @[Manager.scala:238:27]
wire [63:0] _dcacheArb_io_requestor_1_resp_bits_data_raw; // @[Manager.scala:238:27]
wire [63:0] _dcacheArb_io_requestor_1_resp_bits_store_data; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_replay_next; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_s2_xcpt_ma_ld; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_s2_xcpt_ma_st; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_s2_xcpt_pf_ld; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_s2_xcpt_pf_st; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_s2_xcpt_ae_ld; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_s2_xcpt_ae_st; // @[Manager.scala:238:27]
wire [39:0] _dcacheArb_io_requestor_1_s2_gpa; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_ordered; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_store_pending; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_perf_acquire; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_perf_release; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_perf_grant; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_perf_tlbMiss; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_perf_blocked; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_perf_canAcceptStoreThenLoad; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_perf_canAcceptStoreThenRMW; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_perf_canAcceptLoadThenLoad; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterLoad; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterStore; // @[Manager.scala:238:27]
wire _dcacheArb_io_mem_req_valid; // @[Manager.scala:238:27]
wire [39:0] _dcacheArb_io_mem_req_bits_addr; // @[Manager.scala:238:27]
wire [7:0] _dcacheArb_io_mem_req_bits_tag; // @[Manager.scala:238:27]
wire [1:0] _dcacheArb_io_mem_req_bits_dprv; // @[Manager.scala:238:27]
wire _dcacheArb_io_mem_req_bits_dv; // @[Manager.scala:238:27]
wire _dcacheArb_io_mem_req_bits_phys; // @[Manager.scala:238:27]
wire _dcacheArb_io_mem_s1_kill; // @[Manager.scala:238:27]
wire [63:0] _dcacheArb_io_mem_s1_data_data; // @[Manager.scala:238:27]
wire [7:0] _dcacheArb_io_mem_s1_data_mask; // @[Manager.scala:238:27]
wire _dcache_io_cpu_req_ready; // @[Manager.scala:226:61]
wire _dcache_io_cpu_s2_nack; // @[Manager.scala:226:61]
wire _dcache_io_cpu_s2_nack_cause_raw; // @[Manager.scala:226:61]
wire _dcache_io_cpu_s2_uncached; // @[Manager.scala:226:61]
wire [31:0] _dcache_io_cpu_s2_paddr; // @[Manager.scala:226:61]
wire _dcache_io_cpu_resp_valid; // @[Manager.scala:226:61]
wire [39:0] _dcache_io_cpu_resp_bits_addr; // @[Manager.scala:226:61]
wire [7:0] _dcache_io_cpu_resp_bits_tag; // @[Manager.scala:226:61]
wire [4:0] _dcache_io_cpu_resp_bits_cmd; // @[Manager.scala:226:61]
wire [1:0] _dcache_io_cpu_resp_bits_size; // @[Manager.scala:226:61]
wire _dcache_io_cpu_resp_bits_signed; // @[Manager.scala:226:61]
wire [1:0] _dcache_io_cpu_resp_bits_dprv; // @[Manager.scala:226:61]
wire _dcache_io_cpu_resp_bits_dv; // @[Manager.scala:226:61]
wire [63:0] _dcache_io_cpu_resp_bits_data; // @[Manager.scala:226:61]
wire [7:0] _dcache_io_cpu_resp_bits_mask; // @[Manager.scala:226:61]
wire _dcache_io_cpu_resp_bits_replay; // @[Manager.scala:226:61]
wire _dcache_io_cpu_resp_bits_has_data; // @[Manager.scala:226:61]
wire [63:0] _dcache_io_cpu_resp_bits_data_word_bypass; // @[Manager.scala:226:61]
wire [63:0] _dcache_io_cpu_resp_bits_data_raw; // @[Manager.scala:226:61]
wire [63:0] _dcache_io_cpu_resp_bits_store_data; // @[Manager.scala:226:61]
wire _dcache_io_cpu_replay_next; // @[Manager.scala:226:61]
wire _dcache_io_cpu_s2_xcpt_ma_ld; // @[Manager.scala:226:61]
wire _dcache_io_cpu_s2_xcpt_ma_st; // @[Manager.scala:226:61]
wire _dcache_io_cpu_s2_xcpt_pf_ld; // @[Manager.scala:226:61]
wire _dcache_io_cpu_s2_xcpt_pf_st; // @[Manager.scala:226:61]
wire _dcache_io_cpu_s2_xcpt_ae_ld; // @[Manager.scala:226:61]
wire _dcache_io_cpu_s2_xcpt_ae_st; // @[Manager.scala:226:61]
wire [39:0] _dcache_io_cpu_s2_gpa; // @[Manager.scala:226:61]
wire _dcache_io_cpu_ordered; // @[Manager.scala:226:61]
wire _dcache_io_cpu_store_pending; // @[Manager.scala:226:61]
wire _dcache_io_cpu_perf_acquire; // @[Manager.scala:226:61]
wire _dcache_io_cpu_perf_release; // @[Manager.scala:226:61]
wire _dcache_io_cpu_perf_grant; // @[Manager.scala:226:61]
wire _dcache_io_cpu_perf_tlbMiss; // @[Manager.scala:226:61]
wire _dcache_io_cpu_perf_blocked; // @[Manager.scala:226:61]
wire _dcache_io_cpu_perf_canAcceptStoreThenLoad; // @[Manager.scala:226:61]
wire _dcache_io_cpu_perf_canAcceptStoreThenRMW; // @[Manager.scala:226:61]
wire _dcache_io_cpu_perf_canAcceptLoadThenLoad; // @[Manager.scala:226:61]
wire _dcache_io_cpu_perf_storeBufferEmptyAfterLoad; // @[Manager.scala:226:61]
wire _dcache_io_cpu_perf_storeBufferEmptyAfterStore; // @[Manager.scala:226:61]
wire _dcache_io_ptw_req_valid; // @[Manager.scala:226:61]
wire [26:0] _dcache_io_ptw_req_bits_bits_addr; // @[Manager.scala:226:61]
wire _dcache_io_ptw_req_bits_bits_need_gpa; // @[Manager.scala:226:61]
wire _rerocc_buffer_auto_out_req_valid; // @[Protocol.scala:134:35]
wire [2:0] _rerocc_buffer_auto_out_req_bits_opcode; // @[Protocol.scala:134:35]
wire [3:0] _rerocc_buffer_auto_out_req_bits_client_id; // @[Protocol.scala:134:35]
wire _rerocc_buffer_auto_out_req_bits_manager_id; // @[Protocol.scala:134:35]
wire [63:0] _rerocc_buffer_auto_out_req_bits_data; // @[Protocol.scala:134:35]
wire _rerocc_buffer_auto_out_resp_ready; // @[Protocol.scala:134:35]
wire _rerocc_manager_auto_in_req_ready; // @[Manager.scala:209:34]
wire _rerocc_manager_auto_in_resp_valid; // @[Manager.scala:209:34]
wire [2:0] _rerocc_manager_auto_in_resp_bits_opcode; // @[Manager.scala:209:34]
wire [3:0] _rerocc_manager_auto_in_resp_bits_client_id; // @[Manager.scala:209:34]
wire _rerocc_manager_auto_in_resp_bits_manager_id; // @[Manager.scala:209:34]
wire [63:0] _rerocc_manager_auto_in_resp_bits_data; // @[Manager.scala:209:34]
wire [3:0] _rerocc_manager_io_ptw_ptbr_mode; // @[Manager.scala:209:34]
wire [15:0] _rerocc_manager_io_ptw_ptbr_asid; // @[Manager.scala:209:34]
wire [43:0] _rerocc_manager_io_ptw_ptbr_ppn; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_sfence_valid; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_debug; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_cease; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_wfi; // @[Manager.scala:209:34]
wire [31:0] _rerocc_manager_io_ptw_status_isa; // @[Manager.scala:209:34]
wire [1:0] _rerocc_manager_io_ptw_status_dprv; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_dv; // @[Manager.scala:209:34]
wire [1:0] _rerocc_manager_io_ptw_status_prv; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_v; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_sd; // @[Manager.scala:209:34]
wire [22:0] _rerocc_manager_io_ptw_status_zero2; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_mpv; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_gva; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_mbe; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_sbe; // @[Manager.scala:209:34]
wire [1:0] _rerocc_manager_io_ptw_status_sxl; // @[Manager.scala:209:34]
wire [1:0] _rerocc_manager_io_ptw_status_uxl; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_sd_rv32; // @[Manager.scala:209:34]
wire [7:0] _rerocc_manager_io_ptw_status_zero1; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_tsr; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_tw; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_tvm; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_mxr; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_sum; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_mprv; // @[Manager.scala:209:34]
wire [1:0] _rerocc_manager_io_ptw_status_xs; // @[Manager.scala:209:34]
wire [1:0] _rerocc_manager_io_ptw_status_fs; // @[Manager.scala:209:34]
wire [1:0] _rerocc_manager_io_ptw_status_mpp; // @[Manager.scala:209:34]
wire [1:0] _rerocc_manager_io_ptw_status_vs; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_spp; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_mpie; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_ube; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_spie; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_upie; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_mie; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_hie; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_sie; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_uie; // @[Manager.scala:209:34]
wire _accumulator_cmd_q_io_deq_valid; // @[Decoupled.scala:362:21]
wire [6:0] _accumulator_cmd_q_io_deq_bits_inst_funct; // @[Decoupled.scala:362:21]
wire _accumulator_cmd_q_io_deq_bits_inst_xd; // @[Decoupled.scala:362:21]
wire [63:0] _accumulator_cmd_q_io_deq_bits_rs1; // @[Decoupled.scala:362:21]
wire [63:0] _accumulator_cmd_q_io_deq_bits_rs2; // @[Decoupled.scala:362:21]
wire [63:0] _regfile_ext_R0_data; // @[LazyRoCC.scala:124:20]
wire auto_ctrl_ctrl_in_a_valid_0 = auto_ctrl_ctrl_in_a_valid; // @[Manager.scala:237:34]
wire [2:0] auto_ctrl_ctrl_in_a_bits_opcode_0 = auto_ctrl_ctrl_in_a_bits_opcode; // @[Manager.scala:237:34]
wire [2:0] auto_ctrl_ctrl_in_a_bits_param_0 = auto_ctrl_ctrl_in_a_bits_param; // @[Manager.scala:237:34]
wire [2:0] auto_ctrl_ctrl_in_a_bits_size_0 = auto_ctrl_ctrl_in_a_bits_size; // @[Manager.scala:237:34]
wire [6:0] auto_ctrl_ctrl_in_a_bits_source_0 = auto_ctrl_ctrl_in_a_bits_source; // @[Manager.scala:237:34]
wire [11:0] auto_ctrl_ctrl_in_a_bits_address_0 = auto_ctrl_ctrl_in_a_bits_address; // @[Manager.scala:237:34]
wire [7:0] auto_ctrl_ctrl_in_a_bits_mask_0 = auto_ctrl_ctrl_in_a_bits_mask; // @[Manager.scala:237:34]
wire [63:0] auto_ctrl_ctrl_in_a_bits_data_0 = auto_ctrl_ctrl_in_a_bits_data; // @[Manager.scala:237:34]
wire auto_ctrl_ctrl_in_a_bits_corrupt_0 = auto_ctrl_ctrl_in_a_bits_corrupt; // @[Manager.scala:237:34]
wire auto_ctrl_ctrl_in_d_ready_0 = auto_ctrl_ctrl_in_d_ready; // @[Manager.scala:237:34]
wire auto_buffer_out_a_ready_0 = auto_buffer_out_a_ready; // @[Manager.scala:237:34]
wire auto_buffer_out_b_valid_0 = auto_buffer_out_b_valid; // @[Manager.scala:237:34]
wire [2:0] auto_buffer_out_b_bits_opcode_0 = auto_buffer_out_b_bits_opcode; // @[Manager.scala:237:34]
wire [1:0] auto_buffer_out_b_bits_param_0 = auto_buffer_out_b_bits_param; // @[Manager.scala:237:34]
wire [3:0] auto_buffer_out_b_bits_size_0 = auto_buffer_out_b_bits_size; // @[Manager.scala:237:34]
wire auto_buffer_out_b_bits_source_0 = auto_buffer_out_b_bits_source; // @[Manager.scala:237:34]
wire [31:0] auto_buffer_out_b_bits_address_0 = auto_buffer_out_b_bits_address; // @[Manager.scala:237:34]
wire [7:0] auto_buffer_out_b_bits_mask_0 = auto_buffer_out_b_bits_mask; // @[Manager.scala:237:34]
wire [63:0] auto_buffer_out_b_bits_data_0 = auto_buffer_out_b_bits_data; // @[Manager.scala:237:34]
wire auto_buffer_out_b_bits_corrupt_0 = auto_buffer_out_b_bits_corrupt; // @[Manager.scala:237:34]
wire auto_buffer_out_c_ready_0 = auto_buffer_out_c_ready; // @[Manager.scala:237:34]
wire auto_buffer_out_d_valid_0 = auto_buffer_out_d_valid; // @[Manager.scala:237:34]
wire [2:0] auto_buffer_out_d_bits_opcode_0 = auto_buffer_out_d_bits_opcode; // @[Manager.scala:237:34]
wire [1:0] auto_buffer_out_d_bits_param_0 = auto_buffer_out_d_bits_param; // @[Manager.scala:237:34]
wire [3:0] auto_buffer_out_d_bits_size_0 = auto_buffer_out_d_bits_size; // @[Manager.scala:237:34]
wire auto_buffer_out_d_bits_source_0 = auto_buffer_out_d_bits_source; // @[Manager.scala:237:34]
wire [2:0] auto_buffer_out_d_bits_sink_0 = auto_buffer_out_d_bits_sink; // @[Manager.scala:237:34]
wire auto_buffer_out_d_bits_denied_0 = auto_buffer_out_d_bits_denied; // @[Manager.scala:237:34]
wire [63:0] auto_buffer_out_d_bits_data_0 = auto_buffer_out_d_bits_data; // @[Manager.scala:237:34]
wire auto_buffer_out_d_bits_corrupt_0 = auto_buffer_out_d_bits_corrupt; // @[Manager.scala:237:34]
wire auto_buffer_out_e_ready_0 = auto_buffer_out_e_ready; // @[Manager.scala:237:34]
wire auto_re_ro_cc_in_req_valid_0 = auto_re_ro_cc_in_req_valid; // @[Manager.scala:237:34]
wire [2:0] auto_re_ro_cc_in_req_bits_opcode_0 = auto_re_ro_cc_in_req_bits_opcode; // @[Manager.scala:237:34]
wire [3:0] auto_re_ro_cc_in_req_bits_client_id_0 = auto_re_ro_cc_in_req_bits_client_id; // @[Manager.scala:237:34]
wire auto_re_ro_cc_in_req_bits_manager_id_0 = auto_re_ro_cc_in_req_bits_manager_id; // @[Manager.scala:237:34]
wire [63:0] auto_re_ro_cc_in_req_bits_data_0 = auto_re_ro_cc_in_req_bits_data; // @[Manager.scala:237:34]
wire auto_re_ro_cc_in_resp_ready_0 = auto_re_ro_cc_in_resp_ready; // @[Manager.scala:237:34]
wire [6:0] auto_rerocc_manager_id_sink_in_0 = auto_rerocc_manager_id_sink_in; // @[Manager.scala:237:34]
wire xbar__requestAIO_T_4 = 1'h1; // @[Parameters.scala:137:59]
wire xbar_requestAIO_0_0 = 1'h1; // @[Xbar.scala:307:107]
wire xbar__requestCIO_T_4 = 1'h1; // @[Parameters.scala:137:59]
wire xbar_requestCIO_0_0 = 1'h1; // @[Xbar.scala:308:107]
wire xbar__requestBOI_T_1 = 1'h1; // @[Parameters.scala:54:32]
wire xbar__requestBOI_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire xbar__requestBOI_T_3 = 1'h1; // @[Parameters.scala:54:67]
wire xbar__requestBOI_T_4 = 1'h1; // @[Parameters.scala:57:20]
wire xbar_requestBOI_0_0 = 1'h1; // @[Parameters.scala:56:48]
wire xbar__requestDOI_T_1 = 1'h1; // @[Parameters.scala:54:32]
wire xbar__requestDOI_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire xbar__requestDOI_T_3 = 1'h1; // @[Parameters.scala:54:67]
wire xbar__requestDOI_T_4 = 1'h1; // @[Parameters.scala:57:20]
wire xbar_requestDOI_0_0 = 1'h1; // @[Parameters.scala:56:48]
wire xbar__requestEIO_T_1 = 1'h1; // @[Parameters.scala:54:32]
wire xbar__requestEIO_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire xbar__requestEIO_T_3 = 1'h1; // @[Parameters.scala:54:67]
wire xbar__requestEIO_T_4 = 1'h1; // @[Parameters.scala:57:20]
wire xbar_requestEIO_0_0 = 1'h1; // @[Parameters.scala:56:48]
wire xbar__portsAOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire xbar__portsBIO_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire xbar__portsCOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire xbar__portsDIO_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire xbar__portsEOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire [2:0] accumulator_io_fpu_req_bits_rm = 3'h0; // @[LazyRoCC.scala:122:7]
wire [64:0] accumulator_io_fpu_req_bits_in1 = 65'h0; // @[LazyRoCC.scala:122:7]
wire [64:0] accumulator_io_fpu_req_bits_in2 = 65'h0; // @[LazyRoCC.scala:122:7]
wire [64:0] accumulator_io_fpu_req_bits_in3 = 65'h0; // @[LazyRoCC.scala:122:7]
wire [64:0] accumulator_io_fpu_resp_bits_data = 65'h0; // @[LazyRoCC.scala:122:7]
wire [32:0] xbar__requestAIO_T_2 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] xbar__requestAIO_T_3 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] xbar__requestCIO_T_2 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] xbar__requestCIO_T_3 = 33'h0; // @[Parameters.scala:137:46]
wire [8:0] xbar_beatsBO_0 = 9'h0; // @[Edges.scala:221:14]
wire [39:0] accumulator_io_mem_s2_gpa = 40'h0; // @[LazyRoCC.scala:122:7]
wire [31:0] accumulator_io_mem_s2_paddr = 32'h0; // @[LazyRoCC.scala:122:7]
wire [4:0] accumulator_io_mem_req_bits_cmd = 5'h0; // @[LazyRoCC.scala:122:7]
wire [4:0] accumulator_io_fpu_resp_bits_exc = 5'h0; // @[LazyRoCC.scala:122:7]
wire [1:0] accumulator_io_mem_req_bits_size = 2'h3; // @[LazyRoCC.scala:122:7]
wire [63:0] accumulator_io_mem_req_bits_data = 64'h0; // @[LazyRoCC.scala:122:7]
wire [63:0] accumulator_io_mem_s1_data_data = 64'h0; // @[LazyRoCC.scala:122:7]
wire [7:0] accumulator_io_mem_req_bits_mask = 8'h0; // @[LazyRoCC.scala:122:7]
wire [7:0] accumulator_io_mem_s1_data_mask = 8'h0; // @[LazyRoCC.scala:122:7]
wire auto_ctrl_ctrl_in_d_bits_sink = 1'h0; // @[Manager.scala:237:34]
wire auto_ctrl_ctrl_in_d_bits_denied = 1'h0; // @[Manager.scala:237:34]
wire auto_ctrl_ctrl_in_d_bits_corrupt = 1'h0; // @[Manager.scala:237:34]
wire accumulator_io_mem_req_bits_signed = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_req_bits_phys = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_req_bits_no_resp = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_req_bits_no_alloc = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_req_bits_no_xcpt = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_s1_kill = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_s2_nack = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_s2_nack_cause_raw = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_s2_kill = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_s2_uncached = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_replay_next = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_s2_xcpt_ma_ld = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_s2_xcpt_ma_st = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_s2_xcpt_pf_ld = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_s2_xcpt_pf_st = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_s2_xcpt_gf_ld = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_s2_xcpt_gf_st = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_s2_xcpt_ae_ld = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_s2_xcpt_ae_st = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_s2_gpa_is_pte = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_ordered = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_store_pending = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_perf_acquire = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_perf_release = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_perf_grant = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_perf_tlbMiss = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_perf_blocked = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_perf_canAcceptStoreThenLoad = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_perf_canAcceptStoreThenRMW = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_perf_canAcceptLoadThenLoad = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_perf_storeBufferEmptyAfterLoad = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_perf_storeBufferEmptyAfterStore = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_keep_clock_enabled = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_clock_enabled = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_interrupt = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_exception = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_fpu_req_ready = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_fpu_req_valid = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_fpu_req_bits_ldst = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_fpu_req_bits_wen = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_fpu_req_bits_ren1 = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_fpu_req_bits_ren2 = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_fpu_req_bits_ren3 = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_fpu_req_bits_swap12 = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_fpu_req_bits_swap23 = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_fpu_req_bits_fromint = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_fpu_req_bits_toint = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_fpu_req_bits_fastpipe = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_fpu_req_bits_fma = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_fpu_req_bits_div = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_fpu_req_bits_sqrt = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_fpu_req_bits_wflags = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_fpu_req_bits_vec = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_fpu_resp_ready = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator_io_fpu_resp_valid = 1'h0; // @[LazyRoCC.scala:122:7]
wire accumulator__busy_WIRE_0 = 1'h0; // @[LazyRoCC.scala:125:29]
wire accumulator__busy_WIRE_1 = 1'h0; // @[LazyRoCC.scala:125:29]
wire accumulator__busy_WIRE_2 = 1'h0; // @[LazyRoCC.scala:125:29]
wire accumulator__busy_WIRE_3 = 1'h0; // @[LazyRoCC.scala:125:29]
wire xbar_auto_anon_in_a_bits_corrupt = 1'h0; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_c_bits_corrupt = 1'h0; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_a_bits_corrupt = 1'h0; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_c_bits_corrupt = 1'h0; // @[Xbar.scala:74:9]
wire xbar_anonOut_a_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17]
wire xbar_anonOut_c_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17]
wire xbar_anonIn_a_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire xbar_anonIn_c_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire xbar_in_0_a_bits_corrupt = 1'h0; // @[Xbar.scala:159:18]
wire xbar_in_0_c_bits_corrupt = 1'h0; // @[Xbar.scala:159:18]
wire xbar_out_0_a_bits_corrupt = 1'h0; // @[Xbar.scala:216:19]
wire xbar_out_0_c_bits_corrupt = 1'h0; // @[Xbar.scala:216:19]
wire xbar__requestBOI_T = 1'h0; // @[Parameters.scala:54:10]
wire xbar__requestDOI_T = 1'h0; // @[Parameters.scala:54:10]
wire xbar__requestEIO_T = 1'h0; // @[Parameters.scala:54:10]
wire xbar_portsAOI_filtered_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24]
wire xbar_portsCOI_filtered_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24]
wire widget_auto_anon_in_a_bits_corrupt = 1'h0; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_c_bits_corrupt = 1'h0; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_bits_corrupt = 1'h0; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_c_bits_corrupt = 1'h0; // @[WidthWidget.scala:27:9]
wire widget_anonOut_a_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17]
wire widget_anonOut_c_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17]
wire widget_anonIn_a_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire widget_anonIn_c_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire [1:0] auto_ctrl_ctrl_in_d_bits_param = 2'h0; // @[Manager.scala:237:34]
wire [1:0] accumulator_io_fpu_req_bits_typeTagIn = 2'h0; // @[LazyRoCC.scala:122:7]
wire [1:0] accumulator_io_fpu_req_bits_typeTagOut = 2'h0; // @[LazyRoCC.scala:122:7]
wire [1:0] accumulator_io_fpu_req_bits_fmaCmd = 2'h0; // @[LazyRoCC.scala:122:7]
wire [1:0] accumulator_io_fpu_req_bits_typ = 2'h0; // @[LazyRoCC.scala:122:7]
wire [1:0] accumulator_io_fpu_req_bits_fmt = 2'h0; // @[LazyRoCC.scala:122:7]
wire reRoCCNodeIn_req_ready; // @[MixedNode.scala:551:17]
wire reRoCCNodeIn_req_valid = auto_re_ro_cc_in_req_valid_0; // @[Manager.scala:237:34]
wire [2:0] reRoCCNodeIn_req_bits_opcode = auto_re_ro_cc_in_req_bits_opcode_0; // @[Manager.scala:237:34]
wire [3:0] reRoCCNodeIn_req_bits_client_id = auto_re_ro_cc_in_req_bits_client_id_0; // @[Manager.scala:237:34]
wire reRoCCNodeIn_req_bits_manager_id = auto_re_ro_cc_in_req_bits_manager_id_0; // @[Manager.scala:237:34]
wire [63:0] reRoCCNodeIn_req_bits_data = auto_re_ro_cc_in_req_bits_data_0; // @[Manager.scala:237:34]
wire reRoCCNodeIn_resp_ready = auto_re_ro_cc_in_resp_ready_0; // @[Manager.scala:237:34]
wire reRoCCNodeIn_resp_valid; // @[MixedNode.scala:551:17]
wire [2:0] reRoCCNodeIn_resp_bits_opcode; // @[MixedNode.scala:551:17]
wire [3:0] reRoCCNodeIn_resp_bits_client_id; // @[MixedNode.scala:551:17]
wire reRoCCNodeIn_resp_bits_manager_id; // @[MixedNode.scala:551:17]
wire [63:0] reRoCCNodeIn_resp_bits_data; // @[MixedNode.scala:551:17]
wire [6:0] reroccManagerIdSinkNodeIn = auto_rerocc_manager_id_sink_in_0; // @[Manager.scala:237:34]
wire auto_ctrl_ctrl_in_a_ready_0; // @[Manager.scala:237:34]
wire [2:0] auto_ctrl_ctrl_in_d_bits_opcode_0; // @[Manager.scala:237:34]
wire [2:0] auto_ctrl_ctrl_in_d_bits_size_0; // @[Manager.scala:237:34]
wire [6:0] auto_ctrl_ctrl_in_d_bits_source_0; // @[Manager.scala:237:34]
wire [63:0] auto_ctrl_ctrl_in_d_bits_data_0; // @[Manager.scala:237:34]
wire auto_ctrl_ctrl_in_d_valid_0; // @[Manager.scala:237:34]
wire [2:0] auto_buffer_out_a_bits_opcode_0; // @[Manager.scala:237:34]
wire [2:0] auto_buffer_out_a_bits_param_0; // @[Manager.scala:237:34]
wire [3:0] auto_buffer_out_a_bits_size_0; // @[Manager.scala:237:34]
wire auto_buffer_out_a_bits_source_0; // @[Manager.scala:237:34]
wire [31:0] auto_buffer_out_a_bits_address_0; // @[Manager.scala:237:34]
wire [7:0] auto_buffer_out_a_bits_mask_0; // @[Manager.scala:237:34]
wire [63:0] auto_buffer_out_a_bits_data_0; // @[Manager.scala:237:34]
wire auto_buffer_out_a_bits_corrupt_0; // @[Manager.scala:237:34]
wire auto_buffer_out_a_valid_0; // @[Manager.scala:237:34]
wire auto_buffer_out_b_ready_0; // @[Manager.scala:237:34]
wire [2:0] auto_buffer_out_c_bits_opcode_0; // @[Manager.scala:237:34]
wire [2:0] auto_buffer_out_c_bits_param_0; // @[Manager.scala:237:34]
wire [3:0] auto_buffer_out_c_bits_size_0; // @[Manager.scala:237:34]
wire auto_buffer_out_c_bits_source_0; // @[Manager.scala:237:34]
wire [31:0] auto_buffer_out_c_bits_address_0; // @[Manager.scala:237:34]
wire [63:0] auto_buffer_out_c_bits_data_0; // @[Manager.scala:237:34]
wire auto_buffer_out_c_bits_corrupt_0; // @[Manager.scala:237:34]
wire auto_buffer_out_c_valid_0; // @[Manager.scala:237:34]
wire auto_buffer_out_d_ready_0; // @[Manager.scala:237:34]
wire [2:0] auto_buffer_out_e_bits_sink_0; // @[Manager.scala:237:34]
wire auto_buffer_out_e_valid_0; // @[Manager.scala:237:34]
wire auto_re_ro_cc_in_req_ready_0; // @[Manager.scala:237:34]
wire [2:0] auto_re_ro_cc_in_resp_bits_opcode_0; // @[Manager.scala:237:34]
wire [3:0] auto_re_ro_cc_in_resp_bits_client_id_0; // @[Manager.scala:237:34]
wire auto_re_ro_cc_in_resp_bits_manager_id_0; // @[Manager.scala:237:34]
wire [63:0] auto_re_ro_cc_in_resp_bits_data_0; // @[Manager.scala:237:34]
wire auto_re_ro_cc_in_resp_valid_0; // @[Manager.scala:237:34]
wire accumulator__io_resp_valid_T_4; // @[LazyRoCC.scala:164:53]
wire accumulator__io_mem_req_valid_T_4; // @[LazyRoCC.scala:177:56]
wire accumulator__io_busy_T_3; // @[LazyRoCC.scala:171:24]
wire [6:0] accumulator_io_cmd_bits_inst_funct; // @[LazyRoCC.scala:122:7]
wire [4:0] accumulator_io_cmd_bits_inst_rs2; // @[LazyRoCC.scala:122:7]
wire [4:0] accumulator_io_cmd_bits_inst_rs1; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_inst_xd; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_inst_xs1; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_inst_xs2; // @[LazyRoCC.scala:122:7]
wire [4:0] accumulator_io_cmd_bits_inst_rd; // @[LazyRoCC.scala:122:7]
wire [6:0] accumulator_io_cmd_bits_inst_opcode; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_debug; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_cease; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_wfi; // @[LazyRoCC.scala:122:7]
wire [31:0] accumulator_io_cmd_bits_status_isa; // @[LazyRoCC.scala:122:7]
wire [1:0] accumulator_io_cmd_bits_status_dprv; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_dv; // @[LazyRoCC.scala:122:7]
wire [1:0] accumulator_io_cmd_bits_status_prv; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_v; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_sd; // @[LazyRoCC.scala:122:7]
wire [22:0] accumulator_io_cmd_bits_status_zero2; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_mpv; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_gva; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_mbe; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_sbe; // @[LazyRoCC.scala:122:7]
wire [1:0] accumulator_io_cmd_bits_status_sxl; // @[LazyRoCC.scala:122:7]
wire [1:0] accumulator_io_cmd_bits_status_uxl; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_sd_rv32; // @[LazyRoCC.scala:122:7]
wire [7:0] accumulator_io_cmd_bits_status_zero1; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_tsr; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_tw; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_tvm; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_mxr; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_sum; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_mprv; // @[LazyRoCC.scala:122:7]
wire [1:0] accumulator_io_cmd_bits_status_xs; // @[LazyRoCC.scala:122:7]
wire [1:0] accumulator_io_cmd_bits_status_fs; // @[LazyRoCC.scala:122:7]
wire [1:0] accumulator_io_cmd_bits_status_mpp; // @[LazyRoCC.scala:122:7]
wire [1:0] accumulator_io_cmd_bits_status_vs; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_spp; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_mpie; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_ube; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_spie; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_upie; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_mie; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_hie; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_sie; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_bits_status_uie; // @[LazyRoCC.scala:122:7]
wire [63:0] accumulator_io_cmd_bits_rs1; // @[LazyRoCC.scala:122:7]
wire [63:0] accumulator_io_cmd_bits_rs2; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_ready; // @[LazyRoCC.scala:122:7]
wire accumulator_io_cmd_valid; // @[LazyRoCC.scala:122:7]
wire [4:0] accumulator_io_resp_bits_rd; // @[LazyRoCC.scala:122:7]
wire [63:0] accumulator_io_resp_bits_data; // @[LazyRoCC.scala:122:7]
wire accumulator_io_resp_ready; // @[LazyRoCC.scala:122:7]
wire accumulator_io_resp_valid; // @[LazyRoCC.scala:122:7]
wire [39:0] accumulator_io_mem_req_bits_addr; // @[LazyRoCC.scala:122:7]
wire [7:0] accumulator_io_mem_req_bits_tag; // @[LazyRoCC.scala:122:7]
wire [1:0] accumulator_io_mem_req_bits_dprv; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_req_bits_dv; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_req_ready; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_req_valid; // @[LazyRoCC.scala:122:7]
wire [39:0] accumulator_io_mem_resp_bits_addr; // @[LazyRoCC.scala:122:7]
wire [7:0] accumulator_io_mem_resp_bits_tag; // @[LazyRoCC.scala:122:7]
wire [4:0] accumulator_io_mem_resp_bits_cmd; // @[LazyRoCC.scala:122:7]
wire [1:0] accumulator_io_mem_resp_bits_size; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_resp_bits_signed; // @[LazyRoCC.scala:122:7]
wire [1:0] accumulator_io_mem_resp_bits_dprv; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_resp_bits_dv; // @[LazyRoCC.scala:122:7]
wire [63:0] accumulator_io_mem_resp_bits_data; // @[LazyRoCC.scala:122:7]
wire [7:0] accumulator_io_mem_resp_bits_mask; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_resp_bits_replay; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_resp_bits_has_data; // @[LazyRoCC.scala:122:7]
wire [63:0] accumulator_io_mem_resp_bits_data_word_bypass; // @[LazyRoCC.scala:122:7]
wire [63:0] accumulator_io_mem_resp_bits_data_raw; // @[LazyRoCC.scala:122:7]
wire [63:0] accumulator_io_mem_resp_bits_store_data; // @[LazyRoCC.scala:122:7]
wire accumulator_io_mem_resp_valid; // @[LazyRoCC.scala:122:7]
wire accumulator_io_busy; // @[LazyRoCC.scala:122:7]
reg accumulator_busy_0; // @[LazyRoCC.scala:125:21]
reg accumulator_busy_1; // @[LazyRoCC.scala:125:21]
reg accumulator_busy_2; // @[LazyRoCC.scala:125:21]
reg accumulator_busy_3; // @[LazyRoCC.scala:125:21]
wire [1:0] accumulator_addr = _accumulator_cmd_q_io_deq_bits_rs2[1:0]; // @[Decoupled.scala:362:21]
wire accumulator_doWrite = _accumulator_cmd_q_io_deq_bits_inst_funct == 7'h0; // @[Decoupled.scala:362:21]
wire accumulator_doRead = _accumulator_cmd_q_io_deq_bits_inst_funct == 7'h1; // @[Decoupled.scala:362:21]
wire accumulator_doLoad = _accumulator_cmd_q_io_deq_bits_inst_funct == 7'h2; // @[Decoupled.scala:362:21]
wire accumulator_doAccum = _accumulator_cmd_q_io_deq_bits_inst_funct == 7'h3; // @[Decoupled.scala:362:21]
wire [1:0] accumulator_memRespTag = accumulator_io_mem_resp_bits_tag[1:0]; // @[LazyRoCC.scala:122:7, :134:40]
wire [64:0] accumulator__wdata_T = {1'h0, _regfile_ext_R0_data} + {1'h0, _accumulator_cmd_q_io_deq_bits_rs1}; // @[Decoupled.scala:362:21]
wire [63:0] accumulator__wdata_T_1 = accumulator__wdata_T[63:0]; // @[LazyRoCC.scala:139:42]
wire [63:0] accumulator_wdata = accumulator_doWrite ? _accumulator_cmd_q_io_deq_bits_rs1 : accumulator__wdata_T_1; // @[Decoupled.scala:362:21]
wire accumulator__q_io_deq_ready_T_4; // @[LazyRoCC.scala:160:40]
wire accumulator__stallLoad_T = ~accumulator_io_mem_req_ready; // @[LazyRoCC.scala:122:7, :157:29]
wire accumulator_stallLoad = accumulator_doLoad & accumulator__stallLoad_T; // @[LazyRoCC.scala:132:22, :157:{26,29}]
wire accumulator__stallResp_T = ~accumulator_io_resp_ready; // @[LazyRoCC.scala:122:7, :158:29]
wire accumulator_stallResp = _accumulator_cmd_q_io_deq_bits_inst_xd & accumulator__stallResp_T; // @[Decoupled.scala:362:21]
wire [3:0] _GEN = {{accumulator_busy_3}, {accumulator_busy_2}, {accumulator_busy_1}, {accumulator_busy_0}}; // @[LazyRoCC.scala:125:21, :160:16]
wire accumulator__q_io_deq_ready_T = ~_GEN[accumulator_addr]; // @[LazyRoCC.scala:129:26, :160:16]
wire accumulator__q_io_deq_ready_T_1 = ~accumulator_stallLoad; // @[LazyRoCC.scala:157:26, :160:29]
wire accumulator__q_io_deq_ready_T_2 = accumulator__q_io_deq_ready_T & accumulator__q_io_deq_ready_T_1; // @[LazyRoCC.scala:160:{16,26,29}]
wire accumulator__q_io_deq_ready_T_3 = ~accumulator_stallResp; // @[LazyRoCC.scala:158:26, :160:43]
assign accumulator__q_io_deq_ready_T_4 = accumulator__q_io_deq_ready_T_2 & accumulator__q_io_deq_ready_T_3; // @[LazyRoCC.scala:160:{26,40,43}]
wire accumulator__io_resp_valid_T = _accumulator_cmd_q_io_deq_valid & _accumulator_cmd_q_io_deq_bits_inst_xd; // @[Decoupled.scala:362:21]
wire accumulator__io_resp_valid_T_1 = ~_GEN[accumulator_addr]; // @[LazyRoCC.scala:129:26, :160:16, :164:43]
wire accumulator__io_resp_valid_T_2 = accumulator__io_resp_valid_T & accumulator__io_resp_valid_T_1; // @[LazyRoCC.scala:164:{30,40,43}]
wire accumulator__io_resp_valid_T_3 = ~accumulator_stallLoad; // @[LazyRoCC.scala:157:26, :160:29, :164:56]
assign accumulator__io_resp_valid_T_4 = accumulator__io_resp_valid_T_2 & accumulator__io_resp_valid_T_3; // @[LazyRoCC.scala:164:{40,53,56}]
assign accumulator_io_resp_valid = accumulator__io_resp_valid_T_4; // @[LazyRoCC.scala:122:7, :164:53]
wire accumulator__io_busy_T = accumulator_busy_0 | accumulator_busy_1; // @[LazyRoCC.scala:125:21, :171:40]
wire accumulator__io_busy_T_1 = accumulator__io_busy_T | accumulator_busy_2; // @[LazyRoCC.scala:125:21, :171:40]
wire accumulator__io_busy_T_2 = accumulator__io_busy_T_1 | accumulator_busy_3; // @[LazyRoCC.scala:125:21, :171:40]
assign accumulator__io_busy_T_3 = _accumulator_cmd_q_io_deq_valid | accumulator__io_busy_T_2; // @[Decoupled.scala:362:21]
assign accumulator_io_busy = accumulator__io_busy_T_3; // @[LazyRoCC.scala:122:7, :171:24]
wire accumulator__io_mem_req_valid_T = _accumulator_cmd_q_io_deq_valid & accumulator_doLoad; // @[Decoupled.scala:362:21]
wire accumulator__io_mem_req_valid_T_1 = ~_GEN[accumulator_addr]; // @[LazyRoCC.scala:129:26, :160:16, :177:46]
wire accumulator__io_mem_req_valid_T_2 = accumulator__io_mem_req_valid_T & accumulator__io_mem_req_valid_T_1; // @[LazyRoCC.scala:177:{33,43,46}]
wire accumulator__io_mem_req_valid_T_3 = ~accumulator_stallResp; // @[LazyRoCC.scala:158:26, :160:43, :177:59]
assign accumulator__io_mem_req_valid_T_4 = accumulator__io_mem_req_valid_T_2 & accumulator__io_mem_req_valid_T_3; // @[LazyRoCC.scala:177:{43,56,59}]
assign accumulator_io_mem_req_valid = accumulator__io_mem_req_valid_T_4; // @[LazyRoCC.scala:122:7, :177:56]
assign accumulator_io_mem_req_bits_addr = _accumulator_cmd_q_io_deq_bits_rs1[39:0]; // @[Decoupled.scala:362:21]
assign accumulator_io_mem_req_bits_tag = {6'h0, accumulator_addr}; // @[LazyRoCC.scala:122:7, :129:26, :131:22, :179:23]
wire xbar_anonIn_a_ready; // @[MixedNode.scala:551:17]
wire widget_auto_anon_out_a_ready = xbar_auto_anon_in_a_ready; // @[Xbar.scala:74:9]
wire widget_auto_anon_out_a_valid; // @[WidthWidget.scala:27:9]
wire xbar_anonIn_a_valid = xbar_auto_anon_in_a_valid; // @[Xbar.scala:74:9]
wire [2:0] widget_auto_anon_out_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [2:0] xbar_anonIn_a_bits_opcode = xbar_auto_anon_in_a_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] widget_auto_anon_out_a_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] xbar_anonIn_a_bits_param = xbar_auto_anon_in_a_bits_param; // @[Xbar.scala:74:9]
wire [3:0] widget_auto_anon_out_a_bits_size; // @[WidthWidget.scala:27:9]
wire [3:0] xbar_anonIn_a_bits_size = xbar_auto_anon_in_a_bits_size; // @[Xbar.scala:74:9]
wire widget_auto_anon_out_a_bits_source; // @[WidthWidget.scala:27:9]
wire xbar_anonIn_a_bits_source = xbar_auto_anon_in_a_bits_source; // @[Xbar.scala:74:9]
wire [31:0] widget_auto_anon_out_a_bits_address; // @[WidthWidget.scala:27:9]
wire [31:0] xbar_anonIn_a_bits_address = xbar_auto_anon_in_a_bits_address; // @[Xbar.scala:74:9]
wire [7:0] widget_auto_anon_out_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [7:0] xbar_anonIn_a_bits_mask = xbar_auto_anon_in_a_bits_mask; // @[Xbar.scala:74:9]
wire [63:0] widget_auto_anon_out_a_bits_data; // @[WidthWidget.scala:27:9]
wire [63:0] xbar_anonIn_a_bits_data = xbar_auto_anon_in_a_bits_data; // @[Xbar.scala:74:9]
wire widget_auto_anon_out_b_ready; // @[WidthWidget.scala:27:9]
wire xbar_anonIn_b_ready = xbar_auto_anon_in_b_ready; // @[Xbar.scala:74:9]
wire xbar_anonIn_b_valid; // @[MixedNode.scala:551:17]
wire [2:0] xbar_anonIn_b_bits_opcode; // @[MixedNode.scala:551:17]
wire widget_auto_anon_out_b_valid = xbar_auto_anon_in_b_valid; // @[Xbar.scala:74:9]
wire [1:0] xbar_anonIn_b_bits_param; // @[MixedNode.scala:551:17]
wire [2:0] widget_auto_anon_out_b_bits_opcode = xbar_auto_anon_in_b_bits_opcode; // @[Xbar.scala:74:9]
wire [3:0] xbar_anonIn_b_bits_size; // @[MixedNode.scala:551:17]
wire [1:0] widget_auto_anon_out_b_bits_param = xbar_auto_anon_in_b_bits_param; // @[Xbar.scala:74:9]
wire xbar_anonIn_b_bits_source; // @[MixedNode.scala:551:17]
wire [3:0] widget_auto_anon_out_b_bits_size = xbar_auto_anon_in_b_bits_size; // @[Xbar.scala:74:9]
wire [31:0] xbar_anonIn_b_bits_address; // @[MixedNode.scala:551:17]
wire widget_auto_anon_out_b_bits_source = xbar_auto_anon_in_b_bits_source; // @[Xbar.scala:74:9]
wire [7:0] xbar_anonIn_b_bits_mask; // @[MixedNode.scala:551:17]
wire [31:0] widget_auto_anon_out_b_bits_address = xbar_auto_anon_in_b_bits_address; // @[Xbar.scala:74:9]
wire [63:0] xbar_anonIn_b_bits_data; // @[MixedNode.scala:551:17]
wire [7:0] widget_auto_anon_out_b_bits_mask = xbar_auto_anon_in_b_bits_mask; // @[Xbar.scala:74:9]
wire xbar_anonIn_b_bits_corrupt; // @[MixedNode.scala:551:17]
wire [63:0] widget_auto_anon_out_b_bits_data = xbar_auto_anon_in_b_bits_data; // @[Xbar.scala:74:9]
wire xbar_anonIn_c_ready; // @[MixedNode.scala:551:17]
wire widget_auto_anon_out_b_bits_corrupt = xbar_auto_anon_in_b_bits_corrupt; // @[Xbar.scala:74:9]
wire widget_auto_anon_out_c_ready = xbar_auto_anon_in_c_ready; // @[Xbar.scala:74:9]
wire widget_auto_anon_out_c_valid; // @[WidthWidget.scala:27:9]
wire xbar_anonIn_c_valid = xbar_auto_anon_in_c_valid; // @[Xbar.scala:74:9]
wire [2:0] widget_auto_anon_out_c_bits_opcode; // @[WidthWidget.scala:27:9]
wire [2:0] xbar_anonIn_c_bits_opcode = xbar_auto_anon_in_c_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] widget_auto_anon_out_c_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] xbar_anonIn_c_bits_param = xbar_auto_anon_in_c_bits_param; // @[Xbar.scala:74:9]
wire [3:0] widget_auto_anon_out_c_bits_size; // @[WidthWidget.scala:27:9]
wire [3:0] xbar_anonIn_c_bits_size = xbar_auto_anon_in_c_bits_size; // @[Xbar.scala:74:9]
wire widget_auto_anon_out_c_bits_source; // @[WidthWidget.scala:27:9]
wire xbar_anonIn_c_bits_source = xbar_auto_anon_in_c_bits_source; // @[Xbar.scala:74:9]
wire [31:0] widget_auto_anon_out_c_bits_address; // @[WidthWidget.scala:27:9]
wire [31:0] xbar_anonIn_c_bits_address = xbar_auto_anon_in_c_bits_address; // @[Xbar.scala:74:9]
wire [63:0] widget_auto_anon_out_c_bits_data; // @[WidthWidget.scala:27:9]
wire [63:0] xbar_anonIn_c_bits_data = xbar_auto_anon_in_c_bits_data; // @[Xbar.scala:74:9]
wire widget_auto_anon_out_d_ready; // @[WidthWidget.scala:27:9]
wire xbar_anonIn_d_ready = xbar_auto_anon_in_d_ready; // @[Xbar.scala:74:9]
wire xbar_anonIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] xbar_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire widget_auto_anon_out_d_valid = xbar_auto_anon_in_d_valid; // @[Xbar.scala:74:9]
wire [1:0] xbar_anonIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [2:0] widget_auto_anon_out_d_bits_opcode = xbar_auto_anon_in_d_bits_opcode; // @[Xbar.scala:74:9]
wire [3:0] xbar_anonIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [1:0] widget_auto_anon_out_d_bits_param = xbar_auto_anon_in_d_bits_param; // @[Xbar.scala:74:9]
wire xbar_anonIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [3:0] widget_auto_anon_out_d_bits_size = xbar_auto_anon_in_d_bits_size; // @[Xbar.scala:74:9]
wire [2:0] xbar_anonIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire widget_auto_anon_out_d_bits_source = xbar_auto_anon_in_d_bits_source; // @[Xbar.scala:74:9]
wire xbar_anonIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [2:0] widget_auto_anon_out_d_bits_sink = xbar_auto_anon_in_d_bits_sink; // @[Xbar.scala:74:9]
wire [63:0] xbar_anonIn_d_bits_data; // @[MixedNode.scala:551:17]
wire widget_auto_anon_out_d_bits_denied = xbar_auto_anon_in_d_bits_denied; // @[Xbar.scala:74:9]
wire xbar_anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire [63:0] widget_auto_anon_out_d_bits_data = xbar_auto_anon_in_d_bits_data; // @[Xbar.scala:74:9]
wire xbar_anonIn_e_ready; // @[MixedNode.scala:551:17]
wire widget_auto_anon_out_d_bits_corrupt = xbar_auto_anon_in_d_bits_corrupt; // @[Xbar.scala:74:9]
wire widget_auto_anon_out_e_ready = xbar_auto_anon_in_e_ready; // @[Xbar.scala:74:9]
wire widget_auto_anon_out_e_valid; // @[WidthWidget.scala:27:9]
wire xbar_anonIn_e_valid = xbar_auto_anon_in_e_valid; // @[Xbar.scala:74:9]
wire [2:0] widget_auto_anon_out_e_bits_sink; // @[WidthWidget.scala:27:9]
wire [2:0] xbar_anonIn_e_bits_sink = xbar_auto_anon_in_e_bits_sink; // @[Xbar.scala:74:9]
wire xbar_anonOut_a_ready = xbar_auto_anon_out_a_ready; // @[Xbar.scala:74:9]
wire xbar_anonOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] xbar_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] xbar_anonOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] xbar_anonOut_a_bits_size; // @[MixedNode.scala:542:17]
wire xbar_anonOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] xbar_anonOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] xbar_anonOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] xbar_anonOut_a_bits_data; // @[MixedNode.scala:542:17]
wire xbar_anonOut_b_ready; // @[MixedNode.scala:542:17]
wire xbar_anonOut_b_valid = xbar_auto_anon_out_b_valid; // @[Xbar.scala:74:9]
wire [2:0] xbar_anonOut_b_bits_opcode = xbar_auto_anon_out_b_bits_opcode; // @[Xbar.scala:74:9]
wire [1:0] xbar_anonOut_b_bits_param = xbar_auto_anon_out_b_bits_param; // @[Xbar.scala:74:9]
wire [3:0] xbar_anonOut_b_bits_size = xbar_auto_anon_out_b_bits_size; // @[Xbar.scala:74:9]
wire xbar_anonOut_b_bits_source = xbar_auto_anon_out_b_bits_source; // @[Xbar.scala:74:9]
wire [31:0] xbar_anonOut_b_bits_address = xbar_auto_anon_out_b_bits_address; // @[Xbar.scala:74:9]
wire [7:0] xbar_anonOut_b_bits_mask = xbar_auto_anon_out_b_bits_mask; // @[Xbar.scala:74:9]
wire [63:0] xbar_anonOut_b_bits_data = xbar_auto_anon_out_b_bits_data; // @[Xbar.scala:74:9]
wire xbar_anonOut_b_bits_corrupt = xbar_auto_anon_out_b_bits_corrupt; // @[Xbar.scala:74:9]
wire xbar_anonOut_c_ready = xbar_auto_anon_out_c_ready; // @[Xbar.scala:74:9]
wire xbar_anonOut_c_valid; // @[MixedNode.scala:542:17]
wire [2:0] xbar_anonOut_c_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] xbar_anonOut_c_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] xbar_anonOut_c_bits_size; // @[MixedNode.scala:542:17]
wire xbar_anonOut_c_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] xbar_anonOut_c_bits_address; // @[MixedNode.scala:542:17]
wire [63:0] xbar_anonOut_c_bits_data; // @[MixedNode.scala:542:17]
wire xbar_anonOut_d_ready; // @[MixedNode.scala:542:17]
wire xbar_anonOut_d_valid = xbar_auto_anon_out_d_valid; // @[Xbar.scala:74:9]
wire [2:0] xbar_anonOut_d_bits_opcode = xbar_auto_anon_out_d_bits_opcode; // @[Xbar.scala:74:9]
wire [1:0] xbar_anonOut_d_bits_param = xbar_auto_anon_out_d_bits_param; // @[Xbar.scala:74:9]
wire [3:0] xbar_anonOut_d_bits_size = xbar_auto_anon_out_d_bits_size; // @[Xbar.scala:74:9]
wire xbar_anonOut_d_bits_source = xbar_auto_anon_out_d_bits_source; // @[Xbar.scala:74:9]
wire [2:0] xbar_anonOut_d_bits_sink = xbar_auto_anon_out_d_bits_sink; // @[Xbar.scala:74:9]
wire xbar_anonOut_d_bits_denied = xbar_auto_anon_out_d_bits_denied; // @[Xbar.scala:74:9]
wire [63:0] xbar_anonOut_d_bits_data = xbar_auto_anon_out_d_bits_data; // @[Xbar.scala:74:9]
wire xbar_anonOut_d_bits_corrupt = xbar_auto_anon_out_d_bits_corrupt; // @[Xbar.scala:74:9]
wire xbar_anonOut_e_ready = xbar_auto_anon_out_e_ready; // @[Xbar.scala:74:9]
wire xbar_anonOut_e_valid; // @[MixedNode.scala:542:17]
wire [2:0] xbar_anonOut_e_bits_sink; // @[MixedNode.scala:542:17]
wire [2:0] xbar_auto_anon_out_a_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_out_a_bits_param; // @[Xbar.scala:74:9]
wire [3:0] xbar_auto_anon_out_a_bits_size; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_a_bits_source; // @[Xbar.scala:74:9]
wire [31:0] xbar_auto_anon_out_a_bits_address; // @[Xbar.scala:74:9]
wire [7:0] xbar_auto_anon_out_a_bits_mask; // @[Xbar.scala:74:9]
wire [63:0] xbar_auto_anon_out_a_bits_data; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_a_valid; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_b_ready; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_out_c_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_out_c_bits_param; // @[Xbar.scala:74:9]
wire [3:0] xbar_auto_anon_out_c_bits_size; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_c_bits_source; // @[Xbar.scala:74:9]
wire [31:0] xbar_auto_anon_out_c_bits_address; // @[Xbar.scala:74:9]
wire [63:0] xbar_auto_anon_out_c_bits_data; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_c_valid; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_d_ready; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_out_e_bits_sink; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_e_valid; // @[Xbar.scala:74:9]
wire xbar_out_0_a_ready = xbar_anonOut_a_ready; // @[Xbar.scala:216:19]
wire xbar_out_0_a_valid; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_a_valid = xbar_anonOut_a_valid; // @[Xbar.scala:74:9]
wire [2:0] xbar_out_0_a_bits_opcode; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_a_bits_opcode = xbar_anonOut_a_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] xbar_out_0_a_bits_param; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_a_bits_param = xbar_anonOut_a_bits_param; // @[Xbar.scala:74:9]
wire [3:0] xbar_out_0_a_bits_size; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_a_bits_size = xbar_anonOut_a_bits_size; // @[Xbar.scala:74:9]
wire xbar_out_0_a_bits_source; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_a_bits_source = xbar_anonOut_a_bits_source; // @[Xbar.scala:74:9]
wire [31:0] xbar_out_0_a_bits_address; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_a_bits_address = xbar_anonOut_a_bits_address; // @[Xbar.scala:74:9]
wire [7:0] xbar_out_0_a_bits_mask; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_a_bits_mask = xbar_anonOut_a_bits_mask; // @[Xbar.scala:74:9]
wire [63:0] xbar_out_0_a_bits_data; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_a_bits_data = xbar_anonOut_a_bits_data; // @[Xbar.scala:74:9]
wire xbar_out_0_b_ready; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_b_ready = xbar_anonOut_b_ready; // @[Xbar.scala:74:9]
wire xbar_out_0_b_valid = xbar_anonOut_b_valid; // @[Xbar.scala:216:19]
wire [2:0] xbar_out_0_b_bits_opcode = xbar_anonOut_b_bits_opcode; // @[Xbar.scala:216:19]
wire [1:0] xbar_out_0_b_bits_param = xbar_anonOut_b_bits_param; // @[Xbar.scala:216:19]
wire [3:0] xbar_out_0_b_bits_size = xbar_anonOut_b_bits_size; // @[Xbar.scala:216:19]
wire xbar_out_0_b_bits_source = xbar_anonOut_b_bits_source; // @[Xbar.scala:216:19]
wire [31:0] xbar_out_0_b_bits_address = xbar_anonOut_b_bits_address; // @[Xbar.scala:216:19]
wire [7:0] xbar_out_0_b_bits_mask = xbar_anonOut_b_bits_mask; // @[Xbar.scala:216:19]
wire [63:0] xbar_out_0_b_bits_data = xbar_anonOut_b_bits_data; // @[Xbar.scala:216:19]
wire xbar_out_0_b_bits_corrupt = xbar_anonOut_b_bits_corrupt; // @[Xbar.scala:216:19]
wire xbar_out_0_c_ready = xbar_anonOut_c_ready; // @[Xbar.scala:216:19]
wire xbar_out_0_c_valid; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_c_valid = xbar_anonOut_c_valid; // @[Xbar.scala:74:9]
wire [2:0] xbar_out_0_c_bits_opcode; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_c_bits_opcode = xbar_anonOut_c_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] xbar_out_0_c_bits_param; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_c_bits_param = xbar_anonOut_c_bits_param; // @[Xbar.scala:74:9]
wire [3:0] xbar_out_0_c_bits_size; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_c_bits_size = xbar_anonOut_c_bits_size; // @[Xbar.scala:74:9]
wire xbar_out_0_c_bits_source; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_c_bits_source = xbar_anonOut_c_bits_source; // @[Xbar.scala:74:9]
wire [31:0] xbar_out_0_c_bits_address; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_c_bits_address = xbar_anonOut_c_bits_address; // @[Xbar.scala:74:9]
wire [63:0] xbar_out_0_c_bits_data; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_c_bits_data = xbar_anonOut_c_bits_data; // @[Xbar.scala:74:9]
wire xbar_out_0_d_ready; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_d_ready = xbar_anonOut_d_ready; // @[Xbar.scala:74:9]
wire xbar_out_0_d_valid = xbar_anonOut_d_valid; // @[Xbar.scala:216:19]
wire [2:0] xbar_out_0_d_bits_opcode = xbar_anonOut_d_bits_opcode; // @[Xbar.scala:216:19]
wire [1:0] xbar_out_0_d_bits_param = xbar_anonOut_d_bits_param; // @[Xbar.scala:216:19]
wire [3:0] xbar_out_0_d_bits_size = xbar_anonOut_d_bits_size; // @[Xbar.scala:216:19]
wire xbar_out_0_d_bits_source = xbar_anonOut_d_bits_source; // @[Xbar.scala:216:19]
wire [2:0] xbar__out_0_d_bits_sink_T = xbar_anonOut_d_bits_sink; // @[Xbar.scala:251:53]
wire xbar_out_0_d_bits_denied = xbar_anonOut_d_bits_denied; // @[Xbar.scala:216:19]
wire [63:0] xbar_out_0_d_bits_data = xbar_anonOut_d_bits_data; // @[Xbar.scala:216:19]
wire xbar_out_0_d_bits_corrupt = xbar_anonOut_d_bits_corrupt; // @[Xbar.scala:216:19]
wire xbar_out_0_e_ready = xbar_anonOut_e_ready; // @[Xbar.scala:216:19]
wire xbar_out_0_e_valid; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_e_valid = xbar_anonOut_e_valid; // @[Xbar.scala:74:9]
wire [2:0] xbar__anonOut_e_bits_sink_T; // @[Xbar.scala:156:69]
assign xbar_auto_anon_out_e_bits_sink = xbar_anonOut_e_bits_sink; // @[Xbar.scala:74:9]
wire xbar_in_0_a_ready; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_a_ready = xbar_anonIn_a_ready; // @[Xbar.scala:74:9]
wire xbar_in_0_a_valid = xbar_anonIn_a_valid; // @[Xbar.scala:159:18]
wire [2:0] xbar_in_0_a_bits_opcode = xbar_anonIn_a_bits_opcode; // @[Xbar.scala:159:18]
wire [2:0] xbar_in_0_a_bits_param = xbar_anonIn_a_bits_param; // @[Xbar.scala:159:18]
wire [3:0] xbar_in_0_a_bits_size = xbar_anonIn_a_bits_size; // @[Xbar.scala:159:18]
wire xbar__in_0_a_bits_source_T = xbar_anonIn_a_bits_source; // @[Xbar.scala:166:55]
wire [31:0] xbar_in_0_a_bits_address = xbar_anonIn_a_bits_address; // @[Xbar.scala:159:18]
wire [7:0] xbar_in_0_a_bits_mask = xbar_anonIn_a_bits_mask; // @[Xbar.scala:159:18]
wire [63:0] xbar_in_0_a_bits_data = xbar_anonIn_a_bits_data; // @[Xbar.scala:159:18]
wire xbar_in_0_b_ready = xbar_anonIn_b_ready; // @[Xbar.scala:159:18]
wire xbar_in_0_b_valid; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_b_valid = xbar_anonIn_b_valid; // @[Xbar.scala:74:9]
wire [2:0] xbar_in_0_b_bits_opcode; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_b_bits_opcode = xbar_anonIn_b_bits_opcode; // @[Xbar.scala:74:9]
wire [1:0] xbar_in_0_b_bits_param; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_b_bits_param = xbar_anonIn_b_bits_param; // @[Xbar.scala:74:9]
wire [3:0] xbar_in_0_b_bits_size; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_b_bits_size = xbar_anonIn_b_bits_size; // @[Xbar.scala:74:9]
wire xbar__anonIn_b_bits_source_T; // @[Xbar.scala:156:69]
assign xbar_auto_anon_in_b_bits_source = xbar_anonIn_b_bits_source; // @[Xbar.scala:74:9]
wire [31:0] xbar_in_0_b_bits_address; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_b_bits_address = xbar_anonIn_b_bits_address; // @[Xbar.scala:74:9]
wire [7:0] xbar_in_0_b_bits_mask; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_b_bits_mask = xbar_anonIn_b_bits_mask; // @[Xbar.scala:74:9]
wire [63:0] xbar_in_0_b_bits_data; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_b_bits_data = xbar_anonIn_b_bits_data; // @[Xbar.scala:74:9]
wire xbar_in_0_b_bits_corrupt; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_b_bits_corrupt = xbar_anonIn_b_bits_corrupt; // @[Xbar.scala:74:9]
wire xbar_in_0_c_ready; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_c_ready = xbar_anonIn_c_ready; // @[Xbar.scala:74:9]
wire xbar_in_0_c_valid = xbar_anonIn_c_valid; // @[Xbar.scala:159:18]
wire [2:0] xbar_in_0_c_bits_opcode = xbar_anonIn_c_bits_opcode; // @[Xbar.scala:159:18]
wire [2:0] xbar_in_0_c_bits_param = xbar_anonIn_c_bits_param; // @[Xbar.scala:159:18]
wire [3:0] xbar_in_0_c_bits_size = xbar_anonIn_c_bits_size; // @[Xbar.scala:159:18]
wire xbar__in_0_c_bits_source_T = xbar_anonIn_c_bits_source; // @[Xbar.scala:187:55]
wire [31:0] xbar_in_0_c_bits_address = xbar_anonIn_c_bits_address; // @[Xbar.scala:159:18]
wire [63:0] xbar_in_0_c_bits_data = xbar_anonIn_c_bits_data; // @[Xbar.scala:159:18]
wire xbar_in_0_d_ready = xbar_anonIn_d_ready; // @[Xbar.scala:159:18]
wire xbar_in_0_d_valid; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_d_valid = xbar_anonIn_d_valid; // @[Xbar.scala:74:9]
wire [2:0] xbar_in_0_d_bits_opcode; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_d_bits_opcode = xbar_anonIn_d_bits_opcode; // @[Xbar.scala:74:9]
wire [1:0] xbar_in_0_d_bits_param; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_d_bits_param = xbar_anonIn_d_bits_param; // @[Xbar.scala:74:9]
wire [3:0] xbar_in_0_d_bits_size; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_d_bits_size = xbar_anonIn_d_bits_size; // @[Xbar.scala:74:9]
wire xbar__anonIn_d_bits_source_T; // @[Xbar.scala:156:69]
assign xbar_auto_anon_in_d_bits_source = xbar_anonIn_d_bits_source; // @[Xbar.scala:74:9]
wire [2:0] xbar_in_0_d_bits_sink; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_d_bits_sink = xbar_anonIn_d_bits_sink; // @[Xbar.scala:74:9]
wire xbar_in_0_d_bits_denied; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_d_bits_denied = xbar_anonIn_d_bits_denied; // @[Xbar.scala:74:9]
wire [63:0] xbar_in_0_d_bits_data; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_d_bits_data = xbar_anonIn_d_bits_data; // @[Xbar.scala:74:9]
wire xbar_in_0_d_bits_corrupt; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_d_bits_corrupt = xbar_anonIn_d_bits_corrupt; // @[Xbar.scala:74:9]
wire xbar_in_0_e_ready; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_e_ready = xbar_anonIn_e_ready; // @[Xbar.scala:74:9]
wire xbar_in_0_e_valid = xbar_anonIn_e_valid; // @[Xbar.scala:159:18]
wire [2:0] xbar_in_0_e_bits_sink = xbar_anonIn_e_bits_sink; // @[Xbar.scala:159:18]
wire xbar_portsAOI_filtered_0_ready; // @[Xbar.scala:352:24]
assign xbar_anonIn_a_ready = xbar_in_0_a_ready; // @[Xbar.scala:159:18]
wire xbar__portsAOI_filtered_0_valid_T_1 = xbar_in_0_a_valid; // @[Xbar.scala:159:18, :355:40]
wire [2:0] xbar_portsAOI_filtered_0_bits_opcode = xbar_in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24]
wire [2:0] xbar_portsAOI_filtered_0_bits_param = xbar_in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24]
wire [3:0] xbar_portsAOI_filtered_0_bits_size = xbar_in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24]
wire xbar_portsAOI_filtered_0_bits_source = xbar_in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24]
wire [31:0] xbar__requestAIO_T = xbar_in_0_a_bits_address; // @[Xbar.scala:159:18]
wire [31:0] xbar_portsAOI_filtered_0_bits_address = xbar_in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24]
wire [7:0] xbar_portsAOI_filtered_0_bits_mask = xbar_in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24]
wire [63:0] xbar_portsAOI_filtered_0_bits_data = xbar_in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24]
wire xbar_portsBIO_filtered_0_ready = xbar_in_0_b_ready; // @[Xbar.scala:159:18, :352:24]
wire xbar_portsBIO_filtered_0_valid; // @[Xbar.scala:352:24]
assign xbar_anonIn_b_valid = xbar_in_0_b_valid; // @[Xbar.scala:159:18]
wire [2:0] xbar_portsBIO_filtered_0_bits_opcode; // @[Xbar.scala:352:24]
assign xbar_anonIn_b_bits_opcode = xbar_in_0_b_bits_opcode; // @[Xbar.scala:159:18]
wire [1:0] xbar_portsBIO_filtered_0_bits_param; // @[Xbar.scala:352:24]
assign xbar_anonIn_b_bits_param = xbar_in_0_b_bits_param; // @[Xbar.scala:159:18]
wire [3:0] xbar_portsBIO_filtered_0_bits_size; // @[Xbar.scala:352:24]
assign xbar_anonIn_b_bits_size = xbar_in_0_b_bits_size; // @[Xbar.scala:159:18]
wire xbar_portsBIO_filtered_0_bits_source; // @[Xbar.scala:352:24]
assign xbar__anonIn_b_bits_source_T = xbar_in_0_b_bits_source; // @[Xbar.scala:156:69, :159:18]
wire [31:0] xbar_portsBIO_filtered_0_bits_address; // @[Xbar.scala:352:24]
assign xbar_anonIn_b_bits_address = xbar_in_0_b_bits_address; // @[Xbar.scala:159:18]
wire [7:0] xbar_portsBIO_filtered_0_bits_mask; // @[Xbar.scala:352:24]
assign xbar_anonIn_b_bits_mask = xbar_in_0_b_bits_mask; // @[Xbar.scala:159:18]
wire [63:0] xbar_portsBIO_filtered_0_bits_data; // @[Xbar.scala:352:24]
assign xbar_anonIn_b_bits_data = xbar_in_0_b_bits_data; // @[Xbar.scala:159:18]
wire xbar_portsBIO_filtered_0_bits_corrupt; // @[Xbar.scala:352:24]
assign xbar_anonIn_b_bits_corrupt = xbar_in_0_b_bits_corrupt; // @[Xbar.scala:159:18]
wire xbar_portsCOI_filtered_0_ready; // @[Xbar.scala:352:24]
assign xbar_anonIn_c_ready = xbar_in_0_c_ready; // @[Xbar.scala:159:18]
wire xbar__portsCOI_filtered_0_valid_T_1 = xbar_in_0_c_valid; // @[Xbar.scala:159:18, :355:40]
wire [2:0] xbar_portsCOI_filtered_0_bits_opcode = xbar_in_0_c_bits_opcode; // @[Xbar.scala:159:18, :352:24]
wire [2:0] xbar_portsCOI_filtered_0_bits_param = xbar_in_0_c_bits_param; // @[Xbar.scala:159:18, :352:24]
wire [3:0] xbar_portsCOI_filtered_0_bits_size = xbar_in_0_c_bits_size; // @[Xbar.scala:159:18, :352:24]
wire xbar_portsCOI_filtered_0_bits_source = xbar_in_0_c_bits_source; // @[Xbar.scala:159:18, :352:24]
wire [31:0] xbar__requestCIO_T = xbar_in_0_c_bits_address; // @[Xbar.scala:159:18]
wire [31:0] xbar_portsCOI_filtered_0_bits_address = xbar_in_0_c_bits_address; // @[Xbar.scala:159:18, :352:24]
wire [63:0] xbar_portsCOI_filtered_0_bits_data = xbar_in_0_c_bits_data; // @[Xbar.scala:159:18, :352:24]
wire xbar_portsDIO_filtered_0_ready = xbar_in_0_d_ready; // @[Xbar.scala:159:18, :352:24]
wire xbar_portsDIO_filtered_0_valid; // @[Xbar.scala:352:24]
assign xbar_anonIn_d_valid = xbar_in_0_d_valid; // @[Xbar.scala:159:18]
wire [2:0] xbar_portsDIO_filtered_0_bits_opcode; // @[Xbar.scala:352:24]
assign xbar_anonIn_d_bits_opcode = xbar_in_0_d_bits_opcode; // @[Xbar.scala:159:18]
wire [1:0] xbar_portsDIO_filtered_0_bits_param; // @[Xbar.scala:352:24]
assign xbar_anonIn_d_bits_param = xbar_in_0_d_bits_param; // @[Xbar.scala:159:18]
wire [3:0] xbar_portsDIO_filtered_0_bits_size; // @[Xbar.scala:352:24]
assign xbar_anonIn_d_bits_size = xbar_in_0_d_bits_size; // @[Xbar.scala:159:18]
wire xbar_portsDIO_filtered_0_bits_source; // @[Xbar.scala:352:24]
assign xbar__anonIn_d_bits_source_T = xbar_in_0_d_bits_source; // @[Xbar.scala:156:69, :159:18]
wire [2:0] xbar_portsDIO_filtered_0_bits_sink; // @[Xbar.scala:352:24]
assign xbar_anonIn_d_bits_sink = xbar_in_0_d_bits_sink; // @[Xbar.scala:159:18]
wire xbar_portsDIO_filtered_0_bits_denied; // @[Xbar.scala:352:24]
assign xbar_anonIn_d_bits_denied = xbar_in_0_d_bits_denied; // @[Xbar.scala:159:18]
wire [63:0] xbar_portsDIO_filtered_0_bits_data; // @[Xbar.scala:352:24]
assign xbar_anonIn_d_bits_data = xbar_in_0_d_bits_data; // @[Xbar.scala:159:18]
wire xbar_portsDIO_filtered_0_bits_corrupt; // @[Xbar.scala:352:24]
assign xbar_anonIn_d_bits_corrupt = xbar_in_0_d_bits_corrupt; // @[Xbar.scala:159:18]
wire xbar_portsEOI_filtered_0_ready; // @[Xbar.scala:352:24]
assign xbar_anonIn_e_ready = xbar_in_0_e_ready; // @[Xbar.scala:159:18]
wire xbar__portsEOI_filtered_0_valid_T_1 = xbar_in_0_e_valid; // @[Xbar.scala:159:18, :355:40]
wire [2:0] xbar__requestEIO_uncommonBits_T = xbar_in_0_e_bits_sink; // @[Xbar.scala:159:18]
wire [2:0] xbar_portsEOI_filtered_0_bits_sink = xbar_in_0_e_bits_sink; // @[Xbar.scala:159:18, :352:24]
assign xbar_in_0_a_bits_source = xbar__in_0_a_bits_source_T; // @[Xbar.scala:159:18, :166:55]
assign xbar_anonIn_b_bits_source = xbar__anonIn_b_bits_source_T; // @[Xbar.scala:156:69]
assign xbar_in_0_c_bits_source = xbar__in_0_c_bits_source_T; // @[Xbar.scala:159:18, :187:55]
assign xbar_anonIn_d_bits_source = xbar__anonIn_d_bits_source_T; // @[Xbar.scala:156:69]
assign xbar_portsAOI_filtered_0_ready = xbar_out_0_a_ready; // @[Xbar.scala:216:19, :352:24]
wire xbar_portsAOI_filtered_0_valid; // @[Xbar.scala:352:24]
assign xbar_anonOut_a_valid = xbar_out_0_a_valid; // @[Xbar.scala:216:19]
assign xbar_anonOut_a_bits_opcode = xbar_out_0_a_bits_opcode; // @[Xbar.scala:216:19]
assign xbar_anonOut_a_bits_param = xbar_out_0_a_bits_param; // @[Xbar.scala:216:19]
assign xbar_anonOut_a_bits_size = xbar_out_0_a_bits_size; // @[Xbar.scala:216:19]
assign xbar_anonOut_a_bits_source = xbar_out_0_a_bits_source; // @[Xbar.scala:216:19]
assign xbar_anonOut_a_bits_address = xbar_out_0_a_bits_address; // @[Xbar.scala:216:19]
assign xbar_anonOut_a_bits_mask = xbar_out_0_a_bits_mask; // @[Xbar.scala:216:19]
assign xbar_anonOut_a_bits_data = xbar_out_0_a_bits_data; // @[Xbar.scala:216:19]
assign xbar_anonOut_b_ready = xbar_out_0_b_ready; // @[Xbar.scala:216:19]
wire xbar__portsBIO_filtered_0_valid_T_1 = xbar_out_0_b_valid; // @[Xbar.scala:216:19, :355:40]
assign xbar_portsBIO_filtered_0_bits_opcode = xbar_out_0_b_bits_opcode; // @[Xbar.scala:216:19, :352:24]
assign xbar_portsBIO_filtered_0_bits_param = xbar_out_0_b_bits_param; // @[Xbar.scala:216:19, :352:24]
assign xbar_portsBIO_filtered_0_bits_size = xbar_out_0_b_bits_size; // @[Xbar.scala:216:19, :352:24]
wire xbar__requestBOI_uncommonBits_T = xbar_out_0_b_bits_source; // @[Xbar.scala:216:19]
assign xbar_portsBIO_filtered_0_bits_source = xbar_out_0_b_bits_source; // @[Xbar.scala:216:19, :352:24]
assign xbar_portsBIO_filtered_0_bits_address = xbar_out_0_b_bits_address; // @[Xbar.scala:216:19, :352:24]
assign xbar_portsBIO_filtered_0_bits_mask = xbar_out_0_b_bits_mask; // @[Xbar.scala:216:19, :352:24]
assign xbar_portsBIO_filtered_0_bits_data = xbar_out_0_b_bits_data; // @[Xbar.scala:216:19, :352:24]
assign xbar_portsBIO_filtered_0_bits_corrupt = xbar_out_0_b_bits_corrupt; // @[Xbar.scala:216:19, :352:24]
assign xbar_portsCOI_filtered_0_ready = xbar_out_0_c_ready; // @[Xbar.scala:216:19, :352:24]
wire xbar_portsCOI_filtered_0_valid; // @[Xbar.scala:352:24]
assign xbar_anonOut_c_valid = xbar_out_0_c_valid; // @[Xbar.scala:216:19]
assign xbar_anonOut_c_bits_opcode = xbar_out_0_c_bits_opcode; // @[Xbar.scala:216:19]
assign xbar_anonOut_c_bits_param = xbar_out_0_c_bits_param; // @[Xbar.scala:216:19]
assign xbar_anonOut_c_bits_size = xbar_out_0_c_bits_size; // @[Xbar.scala:216:19]
assign xbar_anonOut_c_bits_source = xbar_out_0_c_bits_source; // @[Xbar.scala:216:19]
assign xbar_anonOut_c_bits_address = xbar_out_0_c_bits_address; // @[Xbar.scala:216:19]
assign xbar_anonOut_c_bits_data = xbar_out_0_c_bits_data; // @[Xbar.scala:216:19]
assign xbar_anonOut_d_ready = xbar_out_0_d_ready; // @[Xbar.scala:216:19]
wire xbar__portsDIO_filtered_0_valid_T_1 = xbar_out_0_d_valid; // @[Xbar.scala:216:19, :355:40]
assign xbar_portsDIO_filtered_0_bits_opcode = xbar_out_0_d_bits_opcode; // @[Xbar.scala:216:19, :352:24]
assign xbar_portsDIO_filtered_0_bits_param = xbar_out_0_d_bits_param; // @[Xbar.scala:216:19, :352:24]
assign xbar_portsDIO_filtered_0_bits_size = xbar_out_0_d_bits_size; // @[Xbar.scala:216:19, :352:24]
wire xbar__requestDOI_uncommonBits_T = xbar_out_0_d_bits_source; // @[Xbar.scala:216:19]
assign xbar_portsDIO_filtered_0_bits_source = xbar_out_0_d_bits_source; // @[Xbar.scala:216:19, :352:24]
assign xbar_portsDIO_filtered_0_bits_sink = xbar_out_0_d_bits_sink; // @[Xbar.scala:216:19, :352:24]
assign xbar_portsDIO_filtered_0_bits_denied = xbar_out_0_d_bits_denied; // @[Xbar.scala:216:19, :352:24]
assign xbar_portsDIO_filtered_0_bits_data = xbar_out_0_d_bits_data; // @[Xbar.scala:216:19, :352:24]
assign xbar_portsDIO_filtered_0_bits_corrupt = xbar_out_0_d_bits_corrupt; // @[Xbar.scala:216:19, :352:24]
assign xbar_portsEOI_filtered_0_ready = xbar_out_0_e_ready; // @[Xbar.scala:216:19, :352:24]
wire xbar_portsEOI_filtered_0_valid; // @[Xbar.scala:352:24]
assign xbar_anonOut_e_valid = xbar_out_0_e_valid; // @[Xbar.scala:216:19]
assign xbar__anonOut_e_bits_sink_T = xbar_out_0_e_bits_sink; // @[Xbar.scala:156:69, :216:19]
assign xbar_out_0_d_bits_sink = xbar__out_0_d_bits_sink_T; // @[Xbar.scala:216:19, :251:53]
assign xbar_anonOut_e_bits_sink = xbar__anonOut_e_bits_sink_T; // @[Xbar.scala:156:69]
wire [32:0] xbar__requestAIO_T_1 = {1'h0, xbar__requestAIO_T}; // @[Parameters.scala:137:{31,41}]
wire [32:0] xbar__requestCIO_T_1 = {1'h0, xbar__requestCIO_T}; // @[Parameters.scala:137:{31,41}]
wire xbar_requestBOI_uncommonBits = xbar__requestBOI_uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire xbar_requestDOI_uncommonBits = xbar__requestDOI_uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire [2:0] xbar_requestEIO_uncommonBits = xbar__requestEIO_uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire [26:0] xbar__beatsAI_decode_T = 27'hFFF << xbar_in_0_a_bits_size; // @[package.scala:243:71]
wire [11:0] xbar__beatsAI_decode_T_1 = xbar__beatsAI_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] xbar__beatsAI_decode_T_2 = ~xbar__beatsAI_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] xbar_beatsAI_decode = xbar__beatsAI_decode_T_2[11:3]; // @[package.scala:243:46]
wire xbar__beatsAI_opdata_T = xbar_in_0_a_bits_opcode[2]; // @[Xbar.scala:159:18]
wire xbar_beatsAI_opdata = ~xbar__beatsAI_opdata_T; // @[Edges.scala:92:{28,37}]
wire [8:0] xbar_beatsAI_0 = xbar_beatsAI_opdata ? xbar_beatsAI_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
wire [26:0] xbar__beatsBO_decode_T = 27'hFFF << xbar_out_0_b_bits_size; // @[package.scala:243:71]
wire [11:0] xbar__beatsBO_decode_T_1 = xbar__beatsBO_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] xbar__beatsBO_decode_T_2 = ~xbar__beatsBO_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] xbar_beatsBO_decode = xbar__beatsBO_decode_T_2[11:3]; // @[package.scala:243:46]
wire xbar__beatsBO_opdata_T = xbar_out_0_b_bits_opcode[2]; // @[Xbar.scala:216:19]
wire xbar_beatsBO_opdata = ~xbar__beatsBO_opdata_T; // @[Edges.scala:97:{28,37}]
wire [26:0] xbar__beatsCI_decode_T = 27'hFFF << xbar_in_0_c_bits_size; // @[package.scala:243:71]
wire [11:0] xbar__beatsCI_decode_T_1 = xbar__beatsCI_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] xbar__beatsCI_decode_T_2 = ~xbar__beatsCI_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] xbar_beatsCI_decode = xbar__beatsCI_decode_T_2[11:3]; // @[package.scala:243:46]
wire xbar_beatsCI_opdata = xbar_in_0_c_bits_opcode[0]; // @[Xbar.scala:159:18]
wire [8:0] xbar_beatsCI_0 = xbar_beatsCI_opdata ? xbar_beatsCI_decode : 9'h0; // @[Edges.scala:102:36, :220:59, :221:14]
wire [26:0] xbar__beatsDO_decode_T = 27'hFFF << xbar_out_0_d_bits_size; // @[package.scala:243:71]
wire [11:0] xbar__beatsDO_decode_T_1 = xbar__beatsDO_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] xbar__beatsDO_decode_T_2 = ~xbar__beatsDO_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] xbar_beatsDO_decode = xbar__beatsDO_decode_T_2[11:3]; // @[package.scala:243:46]
wire xbar_beatsDO_opdata = xbar_out_0_d_bits_opcode[0]; // @[Xbar.scala:216:19]
wire [8:0] xbar_beatsDO_0 = xbar_beatsDO_opdata ? xbar_beatsDO_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
assign xbar_in_0_a_ready = xbar_portsAOI_filtered_0_ready; // @[Xbar.scala:159:18, :352:24]
assign xbar_out_0_a_valid = xbar_portsAOI_filtered_0_valid; // @[Xbar.scala:216:19, :352:24]
assign xbar_out_0_a_bits_opcode = xbar_portsAOI_filtered_0_bits_opcode; // @[Xbar.scala:216:19, :352:24]
assign xbar_out_0_a_bits_param = xbar_portsAOI_filtered_0_bits_param; // @[Xbar.scala:216:19, :352:24]
assign xbar_out_0_a_bits_size = xbar_portsAOI_filtered_0_bits_size; // @[Xbar.scala:216:19, :352:24]
assign xbar_out_0_a_bits_source = xbar_portsAOI_filtered_0_bits_source; // @[Xbar.scala:216:19, :352:24]
assign xbar_out_0_a_bits_address = xbar_portsAOI_filtered_0_bits_address; // @[Xbar.scala:216:19, :352:24]
assign xbar_out_0_a_bits_mask = xbar_portsAOI_filtered_0_bits_mask; // @[Xbar.scala:216:19, :352:24]
assign xbar_out_0_a_bits_data = xbar_portsAOI_filtered_0_bits_data; // @[Xbar.scala:216:19, :352:24]
assign xbar_portsAOI_filtered_0_valid = xbar__portsAOI_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40]
assign xbar_out_0_b_ready = xbar_portsBIO_filtered_0_ready; // @[Xbar.scala:216:19, :352:24]
assign xbar_in_0_b_valid = xbar_portsBIO_filtered_0_valid; // @[Xbar.scala:159:18, :352:24]
assign xbar_in_0_b_bits_opcode = xbar_portsBIO_filtered_0_bits_opcode; // @[Xbar.scala:159:18, :352:24]
assign xbar_in_0_b_bits_param = xbar_portsBIO_filtered_0_bits_param; // @[Xbar.scala:159:18, :352:24]
assign xbar_in_0_b_bits_size = xbar_portsBIO_filtered_0_bits_size; // @[Xbar.scala:159:18, :352:24]
assign xbar_in_0_b_bits_source = xbar_portsBIO_filtered_0_bits_source; // @[Xbar.scala:159:18, :352:24]
assign xbar_in_0_b_bits_address = xbar_portsBIO_filtered_0_bits_address; // @[Xbar.scala:159:18, :352:24]
assign xbar_in_0_b_bits_mask = xbar_portsBIO_filtered_0_bits_mask; // @[Xbar.scala:159:18, :352:24]
assign xbar_in_0_b_bits_data = xbar_portsBIO_filtered_0_bits_data; // @[Xbar.scala:159:18, :352:24]
assign xbar_in_0_b_bits_corrupt = xbar_portsBIO_filtered_0_bits_corrupt; // @[Xbar.scala:159:18, :352:24]
assign xbar_portsBIO_filtered_0_valid = xbar__portsBIO_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40]
assign xbar_in_0_c_ready = xbar_portsCOI_filtered_0_ready; // @[Xbar.scala:159:18, :352:24]
assign xbar_out_0_c_valid = xbar_portsCOI_filtered_0_valid; // @[Xbar.scala:216:19, :352:24]
assign xbar_out_0_c_bits_opcode = xbar_portsCOI_filtered_0_bits_opcode; // @[Xbar.scala:216:19, :352:24]
assign xbar_out_0_c_bits_param = xbar_portsCOI_filtered_0_bits_param; // @[Xbar.scala:216:19, :352:24]
assign xbar_out_0_c_bits_size = xbar_portsCOI_filtered_0_bits_size; // @[Xbar.scala:216:19, :352:24]
assign xbar_out_0_c_bits_source = xbar_portsCOI_filtered_0_bits_source; // @[Xbar.scala:216:19, :352:24]
assign xbar_out_0_c_bits_address = xbar_portsCOI_filtered_0_bits_address; // @[Xbar.scala:216:19, :352:24]
assign xbar_out_0_c_bits_data = xbar_portsCOI_filtered_0_bits_data; // @[Xbar.scala:216:19, :352:24]
assign xbar_portsCOI_filtered_0_valid = xbar__portsCOI_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40]
assign xbar_out_0_d_ready = xbar_portsDIO_filtered_0_ready; // @[Xbar.scala:216:19, :352:24]
assign xbar_in_0_d_valid = xbar_portsDIO_filtered_0_valid; // @[Xbar.scala:159:18, :352:24]
assign xbar_in_0_d_bits_opcode = xbar_portsDIO_filtered_0_bits_opcode; // @[Xbar.scala:159:18, :352:24]
assign xbar_in_0_d_bits_param = xbar_portsDIO_filtered_0_bits_param; // @[Xbar.scala:159:18, :352:24]
assign xbar_in_0_d_bits_size = xbar_portsDIO_filtered_0_bits_size; // @[Xbar.scala:159:18, :352:24]
assign xbar_in_0_d_bits_source = xbar_portsDIO_filtered_0_bits_source; // @[Xbar.scala:159:18, :352:24]
assign xbar_in_0_d_bits_sink = xbar_portsDIO_filtered_0_bits_sink; // @[Xbar.scala:159:18, :352:24]
assign xbar_in_0_d_bits_denied = xbar_portsDIO_filtered_0_bits_denied; // @[Xbar.scala:159:18, :352:24]
assign xbar_in_0_d_bits_data = xbar_portsDIO_filtered_0_bits_data; // @[Xbar.scala:159:18, :352:24]
assign xbar_in_0_d_bits_corrupt = xbar_portsDIO_filtered_0_bits_corrupt; // @[Xbar.scala:159:18, :352:24]
assign xbar_portsDIO_filtered_0_valid = xbar__portsDIO_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40]
assign xbar_in_0_e_ready = xbar_portsEOI_filtered_0_ready; // @[Xbar.scala:159:18, :352:24]
assign xbar_out_0_e_valid = xbar_portsEOI_filtered_0_valid; // @[Xbar.scala:216:19, :352:24]
assign xbar_out_0_e_bits_sink = xbar_portsEOI_filtered_0_bits_sink; // @[Xbar.scala:216:19, :352:24]
assign xbar_portsEOI_filtered_0_valid = xbar__portsEOI_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40]
wire widget_anonIn_a_ready; // @[MixedNode.scala:551:17]
wire widget_anonIn_a_valid = widget_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonIn_a_bits_opcode = widget_auto_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonIn_a_bits_param = widget_auto_anon_in_a_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_anonIn_a_bits_size = widget_auto_anon_in_a_bits_size; // @[WidthWidget.scala:27:9]
wire widget_anonIn_a_bits_source = widget_auto_anon_in_a_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] widget_anonIn_a_bits_address = widget_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9]
wire [7:0] widget_anonIn_a_bits_mask = widget_auto_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [63:0] widget_anonIn_a_bits_data = widget_auto_anon_in_a_bits_data; // @[WidthWidget.scala:27:9]
wire widget_anonIn_b_ready = widget_auto_anon_in_b_ready; // @[WidthWidget.scala:27:9]
wire widget_anonIn_b_valid; // @[MixedNode.scala:551:17]
wire [2:0] widget_anonIn_b_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] widget_anonIn_b_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] widget_anonIn_b_bits_size; // @[MixedNode.scala:551:17]
wire widget_anonIn_b_bits_source; // @[MixedNode.scala:551:17]
wire [31:0] widget_anonIn_b_bits_address; // @[MixedNode.scala:551:17]
wire [7:0] widget_anonIn_b_bits_mask; // @[MixedNode.scala:551:17]
wire [63:0] widget_anonIn_b_bits_data; // @[MixedNode.scala:551:17]
wire widget_anonIn_b_bits_corrupt; // @[MixedNode.scala:551:17]
wire widget_anonIn_c_ready; // @[MixedNode.scala:551:17]
wire widget_anonIn_c_valid = widget_auto_anon_in_c_valid; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonIn_c_bits_opcode = widget_auto_anon_in_c_bits_opcode; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonIn_c_bits_param = widget_auto_anon_in_c_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_anonIn_c_bits_size = widget_auto_anon_in_c_bits_size; // @[WidthWidget.scala:27:9]
wire widget_anonIn_c_bits_source = widget_auto_anon_in_c_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] widget_anonIn_c_bits_address = widget_auto_anon_in_c_bits_address; // @[WidthWidget.scala:27:9]
wire [63:0] widget_anonIn_c_bits_data = widget_auto_anon_in_c_bits_data; // @[WidthWidget.scala:27:9]
wire widget_anonIn_d_ready = widget_auto_anon_in_d_ready; // @[WidthWidget.scala:27:9]
wire widget_anonIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] widget_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] widget_anonIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] widget_anonIn_d_bits_size; // @[MixedNode.scala:551:17]
wire widget_anonIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [2:0] widget_anonIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire widget_anonIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] widget_anonIn_d_bits_data; // @[MixedNode.scala:551:17]
wire widget_anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire widget_anonIn_e_ready; // @[MixedNode.scala:551:17]
wire widget_anonIn_e_valid = widget_auto_anon_in_e_valid; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonIn_e_bits_sink = widget_auto_anon_in_e_bits_sink; // @[WidthWidget.scala:27:9]
wire widget_anonOut_a_ready = widget_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9]
wire widget_anonOut_a_valid; // @[MixedNode.scala:542:17]
assign xbar_auto_anon_in_a_valid = widget_auto_anon_out_a_valid; // @[Xbar.scala:74:9]
wire [2:0] widget_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
assign xbar_auto_anon_in_a_bits_opcode = widget_auto_anon_out_a_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] widget_anonOut_a_bits_param; // @[MixedNode.scala:542:17]
assign xbar_auto_anon_in_a_bits_param = widget_auto_anon_out_a_bits_param; // @[Xbar.scala:74:9]
wire [3:0] widget_anonOut_a_bits_size; // @[MixedNode.scala:542:17]
assign xbar_auto_anon_in_a_bits_size = widget_auto_anon_out_a_bits_size; // @[Xbar.scala:74:9]
wire widget_anonOut_a_bits_source; // @[MixedNode.scala:542:17]
assign xbar_auto_anon_in_a_bits_source = widget_auto_anon_out_a_bits_source; // @[Xbar.scala:74:9]
wire [31:0] widget_anonOut_a_bits_address; // @[MixedNode.scala:542:17]
assign xbar_auto_anon_in_a_bits_address = widget_auto_anon_out_a_bits_address; // @[Xbar.scala:74:9]
wire [7:0] widget_anonOut_a_bits_mask; // @[MixedNode.scala:542:17]
assign xbar_auto_anon_in_a_bits_mask = widget_auto_anon_out_a_bits_mask; // @[Xbar.scala:74:9]
wire [63:0] widget_anonOut_a_bits_data; // @[MixedNode.scala:542:17]
assign xbar_auto_anon_in_a_bits_data = widget_auto_anon_out_a_bits_data; // @[Xbar.scala:74:9]
wire widget_anonOut_b_ready; // @[MixedNode.scala:542:17]
assign xbar_auto_anon_in_b_ready = widget_auto_anon_out_b_ready; // @[Xbar.scala:74:9]
wire widget_anonOut_b_valid = widget_auto_anon_out_b_valid; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonOut_b_bits_opcode = widget_auto_anon_out_b_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] widget_anonOut_b_bits_param = widget_auto_anon_out_b_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_anonOut_b_bits_size = widget_auto_anon_out_b_bits_size; // @[WidthWidget.scala:27:9]
wire widget_anonOut_b_bits_source = widget_auto_anon_out_b_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] widget_anonOut_b_bits_address = widget_auto_anon_out_b_bits_address; // @[WidthWidget.scala:27:9]
wire [7:0] widget_anonOut_b_bits_mask = widget_auto_anon_out_b_bits_mask; // @[WidthWidget.scala:27:9]
wire [63:0] widget_anonOut_b_bits_data = widget_auto_anon_out_b_bits_data; // @[WidthWidget.scala:27:9]
wire widget_anonOut_b_bits_corrupt = widget_auto_anon_out_b_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_anonOut_c_ready = widget_auto_anon_out_c_ready; // @[WidthWidget.scala:27:9]
wire widget_anonOut_c_valid; // @[MixedNode.scala:542:17]
assign xbar_auto_anon_in_c_valid = widget_auto_anon_out_c_valid; // @[Xbar.scala:74:9]
wire [2:0] widget_anonOut_c_bits_opcode; // @[MixedNode.scala:542:17]
assign xbar_auto_anon_in_c_bits_opcode = widget_auto_anon_out_c_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] widget_anonOut_c_bits_param; // @[MixedNode.scala:542:17]
assign xbar_auto_anon_in_c_bits_param = widget_auto_anon_out_c_bits_param; // @[Xbar.scala:74:9]
wire [3:0] widget_anonOut_c_bits_size; // @[MixedNode.scala:542:17]
assign xbar_auto_anon_in_c_bits_size = widget_auto_anon_out_c_bits_size; // @[Xbar.scala:74:9]
wire widget_anonOut_c_bits_source; // @[MixedNode.scala:542:17]
assign xbar_auto_anon_in_c_bits_source = widget_auto_anon_out_c_bits_source; // @[Xbar.scala:74:9]
wire [31:0] widget_anonOut_c_bits_address; // @[MixedNode.scala:542:17]
assign xbar_auto_anon_in_c_bits_address = widget_auto_anon_out_c_bits_address; // @[Xbar.scala:74:9]
wire [63:0] widget_anonOut_c_bits_data; // @[MixedNode.scala:542:17]
assign xbar_auto_anon_in_c_bits_data = widget_auto_anon_out_c_bits_data; // @[Xbar.scala:74:9]
wire widget_anonOut_d_ready; // @[MixedNode.scala:542:17]
assign xbar_auto_anon_in_d_ready = widget_auto_anon_out_d_ready; // @[Xbar.scala:74:9]
wire widget_anonOut_d_valid = widget_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonOut_d_bits_opcode = widget_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] widget_anonOut_d_bits_param = widget_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_anonOut_d_bits_size = widget_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9]
wire widget_anonOut_d_bits_source = widget_auto_anon_out_d_bits_source; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonOut_d_bits_sink = widget_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9]
wire widget_anonOut_d_bits_denied = widget_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [63:0] widget_anonOut_d_bits_data = widget_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9]
wire widget_anonOut_d_bits_corrupt = widget_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_anonOut_e_ready = widget_auto_anon_out_e_ready; // @[WidthWidget.scala:27:9]
wire widget_anonOut_e_valid; // @[MixedNode.scala:542:17]
assign xbar_auto_anon_in_e_valid = widget_auto_anon_out_e_valid; // @[Xbar.scala:74:9]
wire [2:0] widget_anonOut_e_bits_sink; // @[MixedNode.scala:542:17]
assign xbar_auto_anon_in_e_bits_sink = widget_auto_anon_out_e_bits_sink; // @[Xbar.scala:74:9]
wire widget_auto_anon_in_a_ready; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_b_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] widget_auto_anon_in_b_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_in_b_bits_size; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_b_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] widget_auto_anon_in_b_bits_address; // @[WidthWidget.scala:27:9]
wire [7:0] widget_auto_anon_in_b_bits_mask; // @[WidthWidget.scala:27:9]
wire [63:0] widget_auto_anon_in_b_bits_data; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_b_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_b_valid; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_c_ready; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] widget_auto_anon_in_d_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_in_d_bits_size; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_bits_source; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_d_bits_sink; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [63:0] widget_auto_anon_in_d_bits_data; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_valid; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_e_ready; // @[WidthWidget.scala:27:9]
assign widget_anonIn_a_ready = widget_anonOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_out_a_valid = widget_anonOut_a_valid; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_opcode = widget_anonOut_a_bits_opcode; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_param = widget_anonOut_a_bits_param; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_size = widget_anonOut_a_bits_size; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_source = widget_anonOut_a_bits_source; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_address = widget_anonOut_a_bits_address; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_mask = widget_anonOut_a_bits_mask; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_data = widget_anonOut_a_bits_data; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_b_ready = widget_anonOut_b_ready; // @[WidthWidget.scala:27:9]
assign widget_anonIn_b_valid = widget_anonOut_b_valid; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_b_bits_opcode = widget_anonOut_b_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_b_bits_param = widget_anonOut_b_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_b_bits_size = widget_anonOut_b_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_b_bits_source = widget_anonOut_b_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_b_bits_address = widget_anonOut_b_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_b_bits_mask = widget_anonOut_b_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_b_bits_data = widget_anonOut_b_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_b_bits_corrupt = widget_anonOut_b_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_c_ready = widget_anonOut_c_ready; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_out_c_valid = widget_anonOut_c_valid; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_c_bits_opcode = widget_anonOut_c_bits_opcode; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_c_bits_param = widget_anonOut_c_bits_param; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_c_bits_size = widget_anonOut_c_bits_size; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_c_bits_source = widget_anonOut_c_bits_source; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_c_bits_address = widget_anonOut_c_bits_address; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_c_bits_data = widget_anonOut_c_bits_data; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_d_ready = widget_anonOut_d_ready; // @[WidthWidget.scala:27:9]
assign widget_anonIn_d_valid = widget_anonOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_opcode = widget_anonOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_param = widget_anonOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_size = widget_anonOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_source = widget_anonOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_sink = widget_anonOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_denied = widget_anonOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_data = widget_anonOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_corrupt = widget_anonOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_e_ready = widget_anonOut_e_ready; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_out_e_valid = widget_anonOut_e_valid; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_e_bits_sink = widget_anonOut_e_bits_sink; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_a_ready = widget_anonIn_a_ready; // @[WidthWidget.scala:27:9]
assign widget_anonOut_a_valid = widget_anonIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_opcode = widget_anonIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_param = widget_anonIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_size = widget_anonIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_source = widget_anonIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_address = widget_anonIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_mask = widget_anonIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_data = widget_anonIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_b_ready = widget_anonIn_b_ready; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_in_b_valid = widget_anonIn_b_valid; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_b_bits_opcode = widget_anonIn_b_bits_opcode; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_b_bits_param = widget_anonIn_b_bits_param; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_b_bits_size = widget_anonIn_b_bits_size; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_b_bits_source = widget_anonIn_b_bits_source; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_b_bits_address = widget_anonIn_b_bits_address; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_b_bits_mask = widget_anonIn_b_bits_mask; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_b_bits_data = widget_anonIn_b_bits_data; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_b_bits_corrupt = widget_anonIn_b_bits_corrupt; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_c_ready = widget_anonIn_c_ready; // @[WidthWidget.scala:27:9]
assign widget_anonOut_c_valid = widget_anonIn_c_valid; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_c_bits_opcode = widget_anonIn_c_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_c_bits_param = widget_anonIn_c_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_c_bits_size = widget_anonIn_c_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_c_bits_source = widget_anonIn_c_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_c_bits_address = widget_anonIn_c_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_c_bits_data = widget_anonIn_c_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_d_ready = widget_anonIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_in_d_valid = widget_anonIn_d_valid; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_opcode = widget_anonIn_d_bits_opcode; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_param = widget_anonIn_d_bits_param; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_size = widget_anonIn_d_bits_size; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_source = widget_anonIn_d_bits_source; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_sink = widget_anonIn_d_bits_sink; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_denied = widget_anonIn_d_bits_denied; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_data = widget_anonIn_d_bits_data; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_corrupt = widget_anonIn_d_bits_corrupt; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_e_ready = widget_anonIn_e_ready; // @[WidthWidget.scala:27:9]
assign widget_anonOut_e_valid = widget_anonIn_e_valid; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_e_bits_sink = widget_anonIn_e_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign reRoCCNodeIn_req_ready = reRoCCNodeOut_req_ready; // @[MixedNode.scala:542:17, :551:17]
assign reRoCCNodeIn_resp_valid = reRoCCNodeOut_resp_valid; // @[MixedNode.scala:542:17, :551:17]
assign reRoCCNodeIn_resp_bits_opcode = reRoCCNodeOut_resp_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign reRoCCNodeIn_resp_bits_client_id = reRoCCNodeOut_resp_bits_client_id; // @[MixedNode.scala:542:17, :551:17]
assign reRoCCNodeIn_resp_bits_manager_id = reRoCCNodeOut_resp_bits_manager_id; // @[MixedNode.scala:542:17, :551:17]
wire [2:0] reRoCCNodeOut_req_bits_opcode; // @[MixedNode.scala:542:17]
wire [3:0] reRoCCNodeOut_req_bits_client_id; // @[MixedNode.scala:542:17]
wire reRoCCNodeOut_req_bits_manager_id; // @[MixedNode.scala:542:17]
wire [63:0] reRoCCNodeOut_req_bits_data; // @[MixedNode.scala:542:17]
assign reRoCCNodeIn_resp_bits_data = reRoCCNodeOut_resp_bits_data; // @[MixedNode.scala:542:17, :551:17]
wire reRoCCNodeOut_req_valid; // @[MixedNode.scala:542:17]
wire reRoCCNodeOut_resp_ready; // @[MixedNode.scala:542:17]
assign auto_re_ro_cc_in_req_ready_0 = reRoCCNodeIn_req_ready; // @[Manager.scala:237:34]
assign reRoCCNodeOut_req_valid = reRoCCNodeIn_req_valid; // @[MixedNode.scala:542:17, :551:17]
assign reRoCCNodeOut_req_bits_opcode = reRoCCNodeIn_req_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign reRoCCNodeOut_req_bits_client_id = reRoCCNodeIn_req_bits_client_id; // @[MixedNode.scala:542:17, :551:17]
assign reRoCCNodeOut_req_bits_manager_id = reRoCCNodeIn_req_bits_manager_id; // @[MixedNode.scala:542:17, :551:17]
assign reRoCCNodeOut_req_bits_data = reRoCCNodeIn_req_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign reRoCCNodeOut_resp_ready = reRoCCNodeIn_resp_ready; // @[MixedNode.scala:542:17, :551:17]
assign auto_re_ro_cc_in_resp_valid_0 = reRoCCNodeIn_resp_valid; // @[Manager.scala:237:34]
assign auto_re_ro_cc_in_resp_bits_opcode_0 = reRoCCNodeIn_resp_bits_opcode; // @[Manager.scala:237:34]
assign auto_re_ro_cc_in_resp_bits_client_id_0 = reRoCCNodeIn_resp_bits_client_id; // @[Manager.scala:237:34]
assign auto_re_ro_cc_in_resp_bits_manager_id_0 = reRoCCNodeIn_resp_bits_manager_id; // @[Manager.scala:237:34]
assign auto_re_ro_cc_in_resp_bits_data_0 = reRoCCNodeIn_resp_bits_data; // @[Manager.scala:237:34]
wire accumulator__T_3 = accumulator_io_mem_req_ready & accumulator_io_mem_req_valid; // @[Decoupled.scala:51:35]
always @(posedge clock) begin // @[Manager.scala:237:34]
if (reset) begin // @[Manager.scala:237:34]
accumulator_busy_0 <= 1'h0; // @[LazyRoCC.scala:125:21]
accumulator_busy_1 <= 1'h0; // @[LazyRoCC.scala:125:21]
accumulator_busy_2 <= 1'h0; // @[LazyRoCC.scala:125:21]
accumulator_busy_3 <= 1'h0; // @[LazyRoCC.scala:125:21]
end
else begin // @[Manager.scala:237:34]
accumulator_busy_0 <= accumulator__T_3 & accumulator_addr == 2'h0 | ~(accumulator_io_mem_resp_valid & accumulator_memRespTag == 2'h0) & accumulator_busy_0; // @[Decoupled.scala:51:35]
accumulator_busy_1 <= accumulator__T_3 & accumulator_addr == 2'h1 | ~(accumulator_io_mem_resp_valid & accumulator_memRespTag == 2'h1) & accumulator_busy_1; // @[Decoupled.scala:51:35]
accumulator_busy_2 <= accumulator__T_3 & accumulator_addr == 2'h2 | ~(accumulator_io_mem_resp_valid & accumulator_memRespTag == 2'h2) & accumulator_busy_2; // @[Decoupled.scala:51:35]
accumulator_busy_3 <= accumulator__T_3 & (&accumulator_addr) | ~(accumulator_io_mem_resp_valid & (&accumulator_memRespTag)) & accumulator_busy_3; // @[Decoupled.scala:51:35]
end
always @(posedge)
regfile_4x64 regfile_ext ( // @[LazyRoCC.scala:124:20]
.R0_addr (accumulator_addr), // @[LazyRoCC.scala:129:26]
.R0_en (1'h1), // @[Manager.scala:237:34]
.R0_clk (clock),
.R0_data (_regfile_ext_R0_data),
.W0_addr (accumulator_memRespTag), // @[LazyRoCC.scala:134:40]
.W0_en (accumulator_io_mem_resp_valid), // @[LazyRoCC.scala:122:7]
.W0_clk (clock),
.W0_data (accumulator_io_mem_resp_bits_data), // @[LazyRoCC.scala:122:7]
.W1_addr (accumulator_addr), // @[LazyRoCC.scala:129:26]
.W1_en (accumulator__q_io_deq_ready_T_4 & _accumulator_cmd_q_io_deq_valid & (accumulator_doWrite | accumulator_doAccum)), // @[Decoupled.scala:51:35, :362:21]
.W1_clk (clock),
.W1_data (accumulator_wdata) // @[LazyRoCC.scala:139:18]
); // @[LazyRoCC.scala:124:20]
assign accumulator_io_resp_bits_data = _regfile_ext_R0_data; // @[LazyRoCC.scala:122:7, :124:20]
Queue2_RoCCCommand_3 accumulator_cmd_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (accumulator_io_cmd_ready),
.io_enq_valid (accumulator_io_cmd_valid), // @[LazyRoCC.scala:122:7]
.io_enq_bits_inst_funct (accumulator_io_cmd_bits_inst_funct), // @[LazyRoCC.scala:122:7]
.io_enq_bits_inst_rs2 (accumulator_io_cmd_bits_inst_rs2), // @[LazyRoCC.scala:122:7]
.io_enq_bits_inst_rs1 (accumulator_io_cmd_bits_inst_rs1), // @[LazyRoCC.scala:122:7]
.io_enq_bits_inst_xd (accumulator_io_cmd_bits_inst_xd), // @[LazyRoCC.scala:122:7]
.io_enq_bits_inst_xs1 (accumulator_io_cmd_bits_inst_xs1), // @[LazyRoCC.scala:122:7]
.io_enq_bits_inst_xs2 (accumulator_io_cmd_bits_inst_xs2), // @[LazyRoCC.scala:122:7]
.io_enq_bits_inst_rd (accumulator_io_cmd_bits_inst_rd), // @[LazyRoCC.scala:122:7]
.io_enq_bits_inst_opcode (accumulator_io_cmd_bits_inst_opcode), // @[LazyRoCC.scala:122:7]
.io_enq_bits_rs1 (accumulator_io_cmd_bits_rs1), // @[LazyRoCC.scala:122:7]
.io_enq_bits_rs2 (accumulator_io_cmd_bits_rs2), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_debug (accumulator_io_cmd_bits_status_debug), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_cease (accumulator_io_cmd_bits_status_cease), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_wfi (accumulator_io_cmd_bits_status_wfi), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_isa (accumulator_io_cmd_bits_status_isa), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_dprv (accumulator_io_cmd_bits_status_dprv), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_dv (accumulator_io_cmd_bits_status_dv), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_prv (accumulator_io_cmd_bits_status_prv), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_v (accumulator_io_cmd_bits_status_v), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_sd (accumulator_io_cmd_bits_status_sd), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_zero2 (accumulator_io_cmd_bits_status_zero2), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_mpv (accumulator_io_cmd_bits_status_mpv), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_gva (accumulator_io_cmd_bits_status_gva), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_mbe (accumulator_io_cmd_bits_status_mbe), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_sbe (accumulator_io_cmd_bits_status_sbe), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_sxl (accumulator_io_cmd_bits_status_sxl), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_uxl (accumulator_io_cmd_bits_status_uxl), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_sd_rv32 (accumulator_io_cmd_bits_status_sd_rv32), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_zero1 (accumulator_io_cmd_bits_status_zero1), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_tsr (accumulator_io_cmd_bits_status_tsr), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_tw (accumulator_io_cmd_bits_status_tw), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_tvm (accumulator_io_cmd_bits_status_tvm), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_mxr (accumulator_io_cmd_bits_status_mxr), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_sum (accumulator_io_cmd_bits_status_sum), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_mprv (accumulator_io_cmd_bits_status_mprv), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_xs (accumulator_io_cmd_bits_status_xs), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_fs (accumulator_io_cmd_bits_status_fs), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_mpp (accumulator_io_cmd_bits_status_mpp), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_vs (accumulator_io_cmd_bits_status_vs), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_spp (accumulator_io_cmd_bits_status_spp), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_mpie (accumulator_io_cmd_bits_status_mpie), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_ube (accumulator_io_cmd_bits_status_ube), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_spie (accumulator_io_cmd_bits_status_spie), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_upie (accumulator_io_cmd_bits_status_upie), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_mie (accumulator_io_cmd_bits_status_mie), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_hie (accumulator_io_cmd_bits_status_hie), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_sie (accumulator_io_cmd_bits_status_sie), // @[LazyRoCC.scala:122:7]
.io_enq_bits_status_uie (accumulator_io_cmd_bits_status_uie), // @[LazyRoCC.scala:122:7]
.io_deq_ready (accumulator__q_io_deq_ready_T_4), // @[LazyRoCC.scala:160:40]
.io_deq_valid (_accumulator_cmd_q_io_deq_valid),
.io_deq_bits_inst_funct (_accumulator_cmd_q_io_deq_bits_inst_funct),
.io_deq_bits_inst_xd (_accumulator_cmd_q_io_deq_bits_inst_xd),
.io_deq_bits_inst_rd (accumulator_io_resp_bits_rd),
.io_deq_bits_rs1 (_accumulator_cmd_q_io_deq_bits_rs1),
.io_deq_bits_rs2 (_accumulator_cmd_q_io_deq_bits_rs2),
.io_deq_bits_status_dprv (accumulator_io_mem_req_bits_dprv),
.io_deq_bits_status_dv (accumulator_io_mem_req_bits_dv)
); // @[Decoupled.scala:362:21]
ReRoCCManager_2 rerocc_manager ( // @[Manager.scala:209:34]
.clock (clock),
.reset (reset),
.auto_in_req_ready (_rerocc_manager_auto_in_req_ready),
.auto_in_req_valid (_rerocc_buffer_auto_out_req_valid), // @[Protocol.scala:134:35]
.auto_in_req_bits_opcode (_rerocc_buffer_auto_out_req_bits_opcode), // @[Protocol.scala:134:35]
.auto_in_req_bits_client_id (_rerocc_buffer_auto_out_req_bits_client_id), // @[Protocol.scala:134:35]
.auto_in_req_bits_manager_id (_rerocc_buffer_auto_out_req_bits_manager_id), // @[Protocol.scala:134:35]
.auto_in_req_bits_data (_rerocc_buffer_auto_out_req_bits_data), // @[Protocol.scala:134:35]
.auto_in_resp_ready (_rerocc_buffer_auto_out_resp_ready), // @[Protocol.scala:134:35]
.auto_in_resp_valid (_rerocc_manager_auto_in_resp_valid),
.auto_in_resp_bits_opcode (_rerocc_manager_auto_in_resp_bits_opcode),
.auto_in_resp_bits_client_id (_rerocc_manager_auto_in_resp_bits_client_id),
.auto_in_resp_bits_manager_id (_rerocc_manager_auto_in_resp_bits_manager_id),
.auto_in_resp_bits_data (_rerocc_manager_auto_in_resp_bits_data),
.io_manager_id (reroccManagerIdSinkNodeIn[2:0]), // @[Manager.scala:262:41]
.io_cmd_ready (accumulator_io_cmd_ready), // @[LazyRoCC.scala:122:7]
.io_cmd_valid (accumulator_io_cmd_valid),
.io_cmd_bits_inst_funct (accumulator_io_cmd_bits_inst_funct),
.io_cmd_bits_inst_rs2 (accumulator_io_cmd_bits_inst_rs2),
.io_cmd_bits_inst_rs1 (accumulator_io_cmd_bits_inst_rs1),
.io_cmd_bits_inst_xd (accumulator_io_cmd_bits_inst_xd),
.io_cmd_bits_inst_xs1 (accumulator_io_cmd_bits_inst_xs1),
.io_cmd_bits_inst_xs2 (accumulator_io_cmd_bits_inst_xs2),
.io_cmd_bits_inst_rd (accumulator_io_cmd_bits_inst_rd),
.io_cmd_bits_inst_opcode (accumulator_io_cmd_bits_inst_opcode),
.io_cmd_bits_rs1 (accumulator_io_cmd_bits_rs1),
.io_cmd_bits_rs2 (accumulator_io_cmd_bits_rs2),
.io_cmd_bits_status_debug (accumulator_io_cmd_bits_status_debug),
.io_cmd_bits_status_cease (accumulator_io_cmd_bits_status_cease),
.io_cmd_bits_status_wfi (accumulator_io_cmd_bits_status_wfi),
.io_cmd_bits_status_isa (accumulator_io_cmd_bits_status_isa),
.io_cmd_bits_status_dprv (accumulator_io_cmd_bits_status_dprv),
.io_cmd_bits_status_dv (accumulator_io_cmd_bits_status_dv),
.io_cmd_bits_status_prv (accumulator_io_cmd_bits_status_prv),
.io_cmd_bits_status_v (accumulator_io_cmd_bits_status_v),
.io_cmd_bits_status_sd (accumulator_io_cmd_bits_status_sd),
.io_cmd_bits_status_zero2 (accumulator_io_cmd_bits_status_zero2),
.io_cmd_bits_status_mpv (accumulator_io_cmd_bits_status_mpv),
.io_cmd_bits_status_gva (accumulator_io_cmd_bits_status_gva),
.io_cmd_bits_status_mbe (accumulator_io_cmd_bits_status_mbe),
.io_cmd_bits_status_sbe (accumulator_io_cmd_bits_status_sbe),
.io_cmd_bits_status_sxl (accumulator_io_cmd_bits_status_sxl),
.io_cmd_bits_status_uxl (accumulator_io_cmd_bits_status_uxl),
.io_cmd_bits_status_sd_rv32 (accumulator_io_cmd_bits_status_sd_rv32),
.io_cmd_bits_status_zero1 (accumulator_io_cmd_bits_status_zero1),
.io_cmd_bits_status_tsr (accumulator_io_cmd_bits_status_tsr),
.io_cmd_bits_status_tw (accumulator_io_cmd_bits_status_tw),
.io_cmd_bits_status_tvm (accumulator_io_cmd_bits_status_tvm),
.io_cmd_bits_status_mxr (accumulator_io_cmd_bits_status_mxr),
.io_cmd_bits_status_sum (accumulator_io_cmd_bits_status_sum),
.io_cmd_bits_status_mprv (accumulator_io_cmd_bits_status_mprv),
.io_cmd_bits_status_xs (accumulator_io_cmd_bits_status_xs),
.io_cmd_bits_status_fs (accumulator_io_cmd_bits_status_fs),
.io_cmd_bits_status_mpp (accumulator_io_cmd_bits_status_mpp),
.io_cmd_bits_status_vs (accumulator_io_cmd_bits_status_vs),
.io_cmd_bits_status_spp (accumulator_io_cmd_bits_status_spp),
.io_cmd_bits_status_mpie (accumulator_io_cmd_bits_status_mpie),
.io_cmd_bits_status_ube (accumulator_io_cmd_bits_status_ube),
.io_cmd_bits_status_spie (accumulator_io_cmd_bits_status_spie),
.io_cmd_bits_status_upie (accumulator_io_cmd_bits_status_upie),
.io_cmd_bits_status_mie (accumulator_io_cmd_bits_status_mie),
.io_cmd_bits_status_hie (accumulator_io_cmd_bits_status_hie),
.io_cmd_bits_status_sie (accumulator_io_cmd_bits_status_sie),
.io_cmd_bits_status_uie (accumulator_io_cmd_bits_status_uie),
.io_resp_ready (accumulator_io_resp_ready),
.io_resp_valid (accumulator_io_resp_valid), // @[LazyRoCC.scala:122:7]
.io_resp_bits_rd (accumulator_io_resp_bits_rd), // @[LazyRoCC.scala:122:7]
.io_resp_bits_data (accumulator_io_resp_bits_data), // @[LazyRoCC.scala:122:7]
.io_busy (accumulator_io_busy), // @[LazyRoCC.scala:122:7]
.io_ptw_ptbr_mode (_rerocc_manager_io_ptw_ptbr_mode),
.io_ptw_ptbr_asid (_rerocc_manager_io_ptw_ptbr_asid),
.io_ptw_ptbr_ppn (_rerocc_manager_io_ptw_ptbr_ppn),
.io_ptw_sfence_valid (_rerocc_manager_io_ptw_sfence_valid),
.io_ptw_status_debug (_rerocc_manager_io_ptw_status_debug),
.io_ptw_status_cease (_rerocc_manager_io_ptw_status_cease),
.io_ptw_status_wfi (_rerocc_manager_io_ptw_status_wfi),
.io_ptw_status_isa (_rerocc_manager_io_ptw_status_isa),
.io_ptw_status_dprv (_rerocc_manager_io_ptw_status_dprv),
.io_ptw_status_dv (_rerocc_manager_io_ptw_status_dv),
.io_ptw_status_prv (_rerocc_manager_io_ptw_status_prv),
.io_ptw_status_v (_rerocc_manager_io_ptw_status_v),
.io_ptw_status_sd (_rerocc_manager_io_ptw_status_sd),
.io_ptw_status_zero2 (_rerocc_manager_io_ptw_status_zero2),
.io_ptw_status_mpv (_rerocc_manager_io_ptw_status_mpv),
.io_ptw_status_gva (_rerocc_manager_io_ptw_status_gva),
.io_ptw_status_mbe (_rerocc_manager_io_ptw_status_mbe),
.io_ptw_status_sbe (_rerocc_manager_io_ptw_status_sbe),
.io_ptw_status_sxl (_rerocc_manager_io_ptw_status_sxl),
.io_ptw_status_uxl (_rerocc_manager_io_ptw_status_uxl),
.io_ptw_status_sd_rv32 (_rerocc_manager_io_ptw_status_sd_rv32),
.io_ptw_status_zero1 (_rerocc_manager_io_ptw_status_zero1),
.io_ptw_status_tsr (_rerocc_manager_io_ptw_status_tsr),
.io_ptw_status_tw (_rerocc_manager_io_ptw_status_tw),
.io_ptw_status_tvm (_rerocc_manager_io_ptw_status_tvm),
.io_ptw_status_mxr (_rerocc_manager_io_ptw_status_mxr),
.io_ptw_status_sum (_rerocc_manager_io_ptw_status_sum),
.io_ptw_status_mprv (_rerocc_manager_io_ptw_status_mprv),
.io_ptw_status_xs (_rerocc_manager_io_ptw_status_xs),
.io_ptw_status_fs (_rerocc_manager_io_ptw_status_fs),
.io_ptw_status_mpp (_rerocc_manager_io_ptw_status_mpp),
.io_ptw_status_vs (_rerocc_manager_io_ptw_status_vs),
.io_ptw_status_spp (_rerocc_manager_io_ptw_status_spp),
.io_ptw_status_mpie (_rerocc_manager_io_ptw_status_mpie),
.io_ptw_status_ube (_rerocc_manager_io_ptw_status_ube),
.io_ptw_status_spie (_rerocc_manager_io_ptw_status_spie),
.io_ptw_status_upie (_rerocc_manager_io_ptw_status_upie),
.io_ptw_status_mie (_rerocc_manager_io_ptw_status_mie),
.io_ptw_status_hie (_rerocc_manager_io_ptw_status_hie),
.io_ptw_status_sie (_rerocc_manager_io_ptw_status_sie),
.io_ptw_status_uie (_rerocc_manager_io_ptw_status_uie),
.io_ptw_perf_pte_miss (_ptw_io_dpath_perf_pte_miss), // @[Manager.scala:243:21]
.io_ptw_clock_enabled (_ptw_io_dpath_clock_enabled) // @[Manager.scala:243:21]
); // @[Manager.scala:209:34]
ReRoCCBuffer_3 rerocc_buffer ( // @[Protocol.scala:134:35]
.clock (clock),
.reset (reset),
.auto_in_req_ready (reRoCCNodeOut_req_ready),
.auto_in_req_valid (reRoCCNodeOut_req_valid), // @[MixedNode.scala:542:17]
.auto_in_req_bits_opcode (reRoCCNodeOut_req_bits_opcode), // @[MixedNode.scala:542:17]
.auto_in_req_bits_client_id (reRoCCNodeOut_req_bits_client_id), // @[MixedNode.scala:542:17]
.auto_in_req_bits_manager_id (reRoCCNodeOut_req_bits_manager_id), // @[MixedNode.scala:542:17]
.auto_in_req_bits_data (reRoCCNodeOut_req_bits_data), // @[MixedNode.scala:542:17]
.auto_in_resp_ready (reRoCCNodeOut_resp_ready), // @[MixedNode.scala:542:17]
.auto_in_resp_valid (reRoCCNodeOut_resp_valid),
.auto_in_resp_bits_opcode (reRoCCNodeOut_resp_bits_opcode),
.auto_in_resp_bits_client_id (reRoCCNodeOut_resp_bits_client_id),
.auto_in_resp_bits_manager_id (reRoCCNodeOut_resp_bits_manager_id),
.auto_in_resp_bits_data (reRoCCNodeOut_resp_bits_data),
.auto_out_req_ready (_rerocc_manager_auto_in_req_ready), // @[Manager.scala:209:34]
.auto_out_req_valid (_rerocc_buffer_auto_out_req_valid),
.auto_out_req_bits_opcode (_rerocc_buffer_auto_out_req_bits_opcode),
.auto_out_req_bits_client_id (_rerocc_buffer_auto_out_req_bits_client_id),
.auto_out_req_bits_manager_id (_rerocc_buffer_auto_out_req_bits_manager_id),
.auto_out_req_bits_data (_rerocc_buffer_auto_out_req_bits_data),
.auto_out_resp_ready (_rerocc_buffer_auto_out_resp_ready),
.auto_out_resp_valid (_rerocc_manager_auto_in_resp_valid), // @[Manager.scala:209:34]
.auto_out_resp_bits_opcode (_rerocc_manager_auto_in_resp_bits_opcode), // @[Manager.scala:209:34]
.auto_out_resp_bits_client_id (_rerocc_manager_auto_in_resp_bits_client_id), // @[Manager.scala:209:34]
.auto_out_resp_bits_manager_id (_rerocc_manager_auto_in_resp_bits_manager_id), // @[Manager.scala:209:34]
.auto_out_resp_bits_data (_rerocc_manager_auto_in_resp_bits_data) // @[Manager.scala:209:34]
); // @[Protocol.scala:134:35]
TLBuffer_a32d64s1k3z4c_6 buffer ( // @[Buffer.scala:75:28]
.clock (clock),
.reset (reset),
.auto_in_a_ready (xbar_auto_anon_out_a_ready),
.auto_in_a_valid (xbar_auto_anon_out_a_valid), // @[Xbar.scala:74:9]
.auto_in_a_bits_opcode (xbar_auto_anon_out_a_bits_opcode), // @[Xbar.scala:74:9]
.auto_in_a_bits_param (xbar_auto_anon_out_a_bits_param), // @[Xbar.scala:74:9]
.auto_in_a_bits_size (xbar_auto_anon_out_a_bits_size), // @[Xbar.scala:74:9]
.auto_in_a_bits_source (xbar_auto_anon_out_a_bits_source), // @[Xbar.scala:74:9]
.auto_in_a_bits_address (xbar_auto_anon_out_a_bits_address), // @[Xbar.scala:74:9]
.auto_in_a_bits_mask (xbar_auto_anon_out_a_bits_mask), // @[Xbar.scala:74:9]
.auto_in_a_bits_data (xbar_auto_anon_out_a_bits_data), // @[Xbar.scala:74:9]
.auto_in_b_ready (xbar_auto_anon_out_b_ready), // @[Xbar.scala:74:9]
.auto_in_b_valid (xbar_auto_anon_out_b_valid),
.auto_in_b_bits_opcode (xbar_auto_anon_out_b_bits_opcode),
.auto_in_b_bits_param (xbar_auto_anon_out_b_bits_param),
.auto_in_b_bits_size (xbar_auto_anon_out_b_bits_size),
.auto_in_b_bits_source (xbar_auto_anon_out_b_bits_source),
.auto_in_b_bits_address (xbar_auto_anon_out_b_bits_address),
.auto_in_b_bits_mask (xbar_auto_anon_out_b_bits_mask),
.auto_in_b_bits_data (xbar_auto_anon_out_b_bits_data),
.auto_in_b_bits_corrupt (xbar_auto_anon_out_b_bits_corrupt),
.auto_in_c_ready (xbar_auto_anon_out_c_ready),
.auto_in_c_valid (xbar_auto_anon_out_c_valid), // @[Xbar.scala:74:9]
.auto_in_c_bits_opcode (xbar_auto_anon_out_c_bits_opcode), // @[Xbar.scala:74:9]
.auto_in_c_bits_param (xbar_auto_anon_out_c_bits_param), // @[Xbar.scala:74:9]
.auto_in_c_bits_size (xbar_auto_anon_out_c_bits_size), // @[Xbar.scala:74:9]
.auto_in_c_bits_source (xbar_auto_anon_out_c_bits_source), // @[Xbar.scala:74:9]
.auto_in_c_bits_address (xbar_auto_anon_out_c_bits_address), // @[Xbar.scala:74:9]
.auto_in_c_bits_data (xbar_auto_anon_out_c_bits_data), // @[Xbar.scala:74:9]
.auto_in_d_ready (xbar_auto_anon_out_d_ready), // @[Xbar.scala:74:9]
.auto_in_d_valid (xbar_auto_anon_out_d_valid),
.auto_in_d_bits_opcode (xbar_auto_anon_out_d_bits_opcode),
.auto_in_d_bits_param (xbar_auto_anon_out_d_bits_param),
.auto_in_d_bits_size (xbar_auto_anon_out_d_bits_size),
.auto_in_d_bits_source (xbar_auto_anon_out_d_bits_source),
.auto_in_d_bits_sink (xbar_auto_anon_out_d_bits_sink),
.auto_in_d_bits_denied (xbar_auto_anon_out_d_bits_denied),
.auto_in_d_bits_data (xbar_auto_anon_out_d_bits_data),
.auto_in_d_bits_corrupt (xbar_auto_anon_out_d_bits_corrupt),
.auto_in_e_ready (xbar_auto_anon_out_e_ready),
.auto_in_e_valid (xbar_auto_anon_out_e_valid), // @[Xbar.scala:74:9]
.auto_in_e_bits_sink (xbar_auto_anon_out_e_bits_sink), // @[Xbar.scala:74:9]
.auto_out_a_ready (auto_buffer_out_a_ready_0), // @[Manager.scala:237:34]
.auto_out_a_valid (auto_buffer_out_a_valid_0),
.auto_out_a_bits_opcode (auto_buffer_out_a_bits_opcode_0),
.auto_out_a_bits_param (auto_buffer_out_a_bits_param_0),
.auto_out_a_bits_size (auto_buffer_out_a_bits_size_0),
.auto_out_a_bits_source (auto_buffer_out_a_bits_source_0),
.auto_out_a_bits_address (auto_buffer_out_a_bits_address_0),
.auto_out_a_bits_mask (auto_buffer_out_a_bits_mask_0),
.auto_out_a_bits_data (auto_buffer_out_a_bits_data_0),
.auto_out_a_bits_corrupt (auto_buffer_out_a_bits_corrupt_0),
.auto_out_b_ready (auto_buffer_out_b_ready_0),
.auto_out_b_valid (auto_buffer_out_b_valid_0), // @[Manager.scala:237:34]
.auto_out_b_bits_opcode (auto_buffer_out_b_bits_opcode_0), // @[Manager.scala:237:34]
.auto_out_b_bits_param (auto_buffer_out_b_bits_param_0), // @[Manager.scala:237:34]
.auto_out_b_bits_size (auto_buffer_out_b_bits_size_0), // @[Manager.scala:237:34]
.auto_out_b_bits_source (auto_buffer_out_b_bits_source_0), // @[Manager.scala:237:34]
.auto_out_b_bits_address (auto_buffer_out_b_bits_address_0), // @[Manager.scala:237:34]
.auto_out_b_bits_mask (auto_buffer_out_b_bits_mask_0), // @[Manager.scala:237:34]
.auto_out_b_bits_data (auto_buffer_out_b_bits_data_0), // @[Manager.scala:237:34]
.auto_out_b_bits_corrupt (auto_buffer_out_b_bits_corrupt_0), // @[Manager.scala:237:34]
.auto_out_c_ready (auto_buffer_out_c_ready_0), // @[Manager.scala:237:34]
.auto_out_c_valid (auto_buffer_out_c_valid_0),
.auto_out_c_bits_opcode (auto_buffer_out_c_bits_opcode_0),
.auto_out_c_bits_param (auto_buffer_out_c_bits_param_0),
.auto_out_c_bits_size (auto_buffer_out_c_bits_size_0),
.auto_out_c_bits_source (auto_buffer_out_c_bits_source_0),
.auto_out_c_bits_address (auto_buffer_out_c_bits_address_0),
.auto_out_c_bits_data (auto_buffer_out_c_bits_data_0),
.auto_out_c_bits_corrupt (auto_buffer_out_c_bits_corrupt_0),
.auto_out_d_ready (auto_buffer_out_d_ready_0),
.auto_out_d_valid (auto_buffer_out_d_valid_0), // @[Manager.scala:237:34]
.auto_out_d_bits_opcode (auto_buffer_out_d_bits_opcode_0), // @[Manager.scala:237:34]
.auto_out_d_bits_param (auto_buffer_out_d_bits_param_0), // @[Manager.scala:237:34]
.auto_out_d_bits_size (auto_buffer_out_d_bits_size_0), // @[Manager.scala:237:34]
.auto_out_d_bits_source (auto_buffer_out_d_bits_source_0), // @[Manager.scala:237:34]
.auto_out_d_bits_sink (auto_buffer_out_d_bits_sink_0), // @[Manager.scala:237:34]
.auto_out_d_bits_denied (auto_buffer_out_d_bits_denied_0), // @[Manager.scala:237:34]
.auto_out_d_bits_data (auto_buffer_out_d_bits_data_0), // @[Manager.scala:237:34]
.auto_out_d_bits_corrupt (auto_buffer_out_d_bits_corrupt_0), // @[Manager.scala:237:34]
.auto_out_e_ready (auto_buffer_out_e_ready_0), // @[Manager.scala:237:34]
.auto_out_e_valid (auto_buffer_out_e_valid_0),
.auto_out_e_bits_sink (auto_buffer_out_e_bits_sink_0)
); // @[Buffer.scala:75:28]
MiniDCache_2 dcache ( // @[Manager.scala:226:61]
.clock (clock),
.reset (reset),
.auto_out_a_ready (widget_auto_anon_in_a_ready), // @[WidthWidget.scala:27:9]
.auto_out_a_valid (widget_auto_anon_in_a_valid),
.auto_out_a_bits_opcode (widget_auto_anon_in_a_bits_opcode),
.auto_out_a_bits_param (widget_auto_anon_in_a_bits_param),
.auto_out_a_bits_size (widget_auto_anon_in_a_bits_size),
.auto_out_a_bits_source (widget_auto_anon_in_a_bits_source),
.auto_out_a_bits_address (widget_auto_anon_in_a_bits_address),
.auto_out_a_bits_mask (widget_auto_anon_in_a_bits_mask),
.auto_out_a_bits_data (widget_auto_anon_in_a_bits_data),
.auto_out_b_ready (widget_auto_anon_in_b_ready),
.auto_out_b_valid (widget_auto_anon_in_b_valid), // @[WidthWidget.scala:27:9]
.auto_out_b_bits_opcode (widget_auto_anon_in_b_bits_opcode), // @[WidthWidget.scala:27:9]
.auto_out_b_bits_param (widget_auto_anon_in_b_bits_param), // @[WidthWidget.scala:27:9]
.auto_out_b_bits_size (widget_auto_anon_in_b_bits_size), // @[WidthWidget.scala:27:9]
.auto_out_b_bits_source (widget_auto_anon_in_b_bits_source), // @[WidthWidget.scala:27:9]
.auto_out_b_bits_address (widget_auto_anon_in_b_bits_address), // @[WidthWidget.scala:27:9]
.auto_out_b_bits_mask (widget_auto_anon_in_b_bits_mask), // @[WidthWidget.scala:27:9]
.auto_out_b_bits_data (widget_auto_anon_in_b_bits_data), // @[WidthWidget.scala:27:9]
.auto_out_b_bits_corrupt (widget_auto_anon_in_b_bits_corrupt), // @[WidthWidget.scala:27:9]
.auto_out_c_ready (widget_auto_anon_in_c_ready), // @[WidthWidget.scala:27:9]
.auto_out_c_valid (widget_auto_anon_in_c_valid),
.auto_out_c_bits_opcode (widget_auto_anon_in_c_bits_opcode),
.auto_out_c_bits_param (widget_auto_anon_in_c_bits_param),
.auto_out_c_bits_size (widget_auto_anon_in_c_bits_size),
.auto_out_c_bits_source (widget_auto_anon_in_c_bits_source),
.auto_out_c_bits_address (widget_auto_anon_in_c_bits_address),
.auto_out_c_bits_data (widget_auto_anon_in_c_bits_data),
.auto_out_d_ready (widget_auto_anon_in_d_ready),
.auto_out_d_valid (widget_auto_anon_in_d_valid), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_opcode (widget_auto_anon_in_d_bits_opcode), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_param (widget_auto_anon_in_d_bits_param), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_size (widget_auto_anon_in_d_bits_size), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_source (widget_auto_anon_in_d_bits_source), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_sink (widget_auto_anon_in_d_bits_sink), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_denied (widget_auto_anon_in_d_bits_denied), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_data (widget_auto_anon_in_d_bits_data), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_corrupt (widget_auto_anon_in_d_bits_corrupt), // @[WidthWidget.scala:27:9]
.auto_out_e_ready (widget_auto_anon_in_e_ready), // @[WidthWidget.scala:27:9]
.auto_out_e_valid (widget_auto_anon_in_e_valid),
.auto_out_e_bits_sink (widget_auto_anon_in_e_bits_sink),
.io_cpu_req_ready (_dcache_io_cpu_req_ready),
.io_cpu_req_valid (_dcacheArb_io_mem_req_valid), // @[Manager.scala:238:27]
.io_cpu_req_bits_addr (_dcacheArb_io_mem_req_bits_addr), // @[Manager.scala:238:27]
.io_cpu_req_bits_tag (_dcacheArb_io_mem_req_bits_tag), // @[Manager.scala:238:27]
.io_cpu_req_bits_dprv (_dcacheArb_io_mem_req_bits_dprv), // @[Manager.scala:238:27]
.io_cpu_req_bits_dv (_dcacheArb_io_mem_req_bits_dv), // @[Manager.scala:238:27]
.io_cpu_req_bits_phys (_dcacheArb_io_mem_req_bits_phys), // @[Manager.scala:238:27]
.io_cpu_s1_kill (_dcacheArb_io_mem_s1_kill), // @[Manager.scala:238:27]
.io_cpu_s1_data_data (_dcacheArb_io_mem_s1_data_data), // @[Manager.scala:238:27]
.io_cpu_s1_data_mask (_dcacheArb_io_mem_s1_data_mask), // @[Manager.scala:238:27]
.io_cpu_s2_nack (_dcache_io_cpu_s2_nack),
.io_cpu_s2_nack_cause_raw (_dcache_io_cpu_s2_nack_cause_raw),
.io_cpu_s2_uncached (_dcache_io_cpu_s2_uncached),
.io_cpu_s2_paddr (_dcache_io_cpu_s2_paddr),
.io_cpu_resp_valid (_dcache_io_cpu_resp_valid),
.io_cpu_resp_bits_addr (_dcache_io_cpu_resp_bits_addr),
.io_cpu_resp_bits_tag (_dcache_io_cpu_resp_bits_tag),
.io_cpu_resp_bits_cmd (_dcache_io_cpu_resp_bits_cmd),
.io_cpu_resp_bits_size (_dcache_io_cpu_resp_bits_size),
.io_cpu_resp_bits_signed (_dcache_io_cpu_resp_bits_signed),
.io_cpu_resp_bits_dprv (_dcache_io_cpu_resp_bits_dprv),
.io_cpu_resp_bits_dv (_dcache_io_cpu_resp_bits_dv),
.io_cpu_resp_bits_data (_dcache_io_cpu_resp_bits_data),
.io_cpu_resp_bits_mask (_dcache_io_cpu_resp_bits_mask),
.io_cpu_resp_bits_replay (_dcache_io_cpu_resp_bits_replay),
.io_cpu_resp_bits_has_data (_dcache_io_cpu_resp_bits_has_data),
.io_cpu_resp_bits_data_word_bypass (_dcache_io_cpu_resp_bits_data_word_bypass),
.io_cpu_resp_bits_data_raw (_dcache_io_cpu_resp_bits_data_raw),
.io_cpu_resp_bits_store_data (_dcache_io_cpu_resp_bits_store_data),
.io_cpu_replay_next (_dcache_io_cpu_replay_next),
.io_cpu_s2_xcpt_ma_ld (_dcache_io_cpu_s2_xcpt_ma_ld),
.io_cpu_s2_xcpt_ma_st (_dcache_io_cpu_s2_xcpt_ma_st),
.io_cpu_s2_xcpt_pf_ld (_dcache_io_cpu_s2_xcpt_pf_ld),
.io_cpu_s2_xcpt_pf_st (_dcache_io_cpu_s2_xcpt_pf_st),
.io_cpu_s2_xcpt_ae_ld (_dcache_io_cpu_s2_xcpt_ae_ld),
.io_cpu_s2_xcpt_ae_st (_dcache_io_cpu_s2_xcpt_ae_st),
.io_cpu_s2_gpa (_dcache_io_cpu_s2_gpa),
.io_cpu_ordered (_dcache_io_cpu_ordered),
.io_cpu_store_pending (_dcache_io_cpu_store_pending),
.io_cpu_perf_acquire (_dcache_io_cpu_perf_acquire),
.io_cpu_perf_release (_dcache_io_cpu_perf_release),
.io_cpu_perf_grant (_dcache_io_cpu_perf_grant),
.io_cpu_perf_tlbMiss (_dcache_io_cpu_perf_tlbMiss),
.io_cpu_perf_blocked (_dcache_io_cpu_perf_blocked),
.io_cpu_perf_canAcceptStoreThenLoad (_dcache_io_cpu_perf_canAcceptStoreThenLoad),
.io_cpu_perf_canAcceptStoreThenRMW (_dcache_io_cpu_perf_canAcceptStoreThenRMW),
.io_cpu_perf_canAcceptLoadThenLoad (_dcache_io_cpu_perf_canAcceptLoadThenLoad),
.io_cpu_perf_storeBufferEmptyAfterLoad (_dcache_io_cpu_perf_storeBufferEmptyAfterLoad),
.io_cpu_perf_storeBufferEmptyAfterStore (_dcache_io_cpu_perf_storeBufferEmptyAfterStore),
.io_ptw_req_ready (_ptw_io_requestor_0_req_ready), // @[Manager.scala:243:21]
.io_ptw_req_valid (_dcache_io_ptw_req_valid),
.io_ptw_req_bits_bits_addr (_dcache_io_ptw_req_bits_bits_addr),
.io_ptw_req_bits_bits_need_gpa (_dcache_io_ptw_req_bits_bits_need_gpa),
.io_ptw_resp_valid (_ptw_io_requestor_0_resp_valid), // @[Manager.scala:243:21]
.io_ptw_resp_bits_ae_ptw (_ptw_io_requestor_0_resp_bits_ae_ptw), // @[Manager.scala:243:21]
.io_ptw_resp_bits_ae_final (_ptw_io_requestor_0_resp_bits_ae_final), // @[Manager.scala:243:21]
.io_ptw_resp_bits_pf (_ptw_io_requestor_0_resp_bits_pf), // @[Manager.scala:243:21]
.io_ptw_resp_bits_gf (_ptw_io_requestor_0_resp_bits_gf), // @[Manager.scala:243:21]
.io_ptw_resp_bits_hr (_ptw_io_requestor_0_resp_bits_hr), // @[Manager.scala:243:21]
.io_ptw_resp_bits_hw (_ptw_io_requestor_0_resp_bits_hw), // @[Manager.scala:243:21]
.io_ptw_resp_bits_hx (_ptw_io_requestor_0_resp_bits_hx), // @[Manager.scala:243:21]
.io_ptw_resp_bits_pte_reserved_for_future (_ptw_io_requestor_0_resp_bits_pte_reserved_for_future), // @[Manager.scala:243:21]
.io_ptw_resp_bits_pte_ppn (_ptw_io_requestor_0_resp_bits_pte_ppn), // @[Manager.scala:243:21]
.io_ptw_resp_bits_pte_reserved_for_software (_ptw_io_requestor_0_resp_bits_pte_reserved_for_software), // @[Manager.scala:243:21]
.io_ptw_resp_bits_pte_d (_ptw_io_requestor_0_resp_bits_pte_d), // @[Manager.scala:243:21]
.io_ptw_resp_bits_pte_a (_ptw_io_requestor_0_resp_bits_pte_a), // @[Manager.scala:243:21]
.io_ptw_resp_bits_pte_g (_ptw_io_requestor_0_resp_bits_pte_g), // @[Manager.scala:243:21]
.io_ptw_resp_bits_pte_u (_ptw_io_requestor_0_resp_bits_pte_u), // @[Manager.scala:243:21]
.io_ptw_resp_bits_pte_x (_ptw_io_requestor_0_resp_bits_pte_x), // @[Manager.scala:243:21]
.io_ptw_resp_bits_pte_w (_ptw_io_requestor_0_resp_bits_pte_w), // @[Manager.scala:243:21]
.io_ptw_resp_bits_pte_r (_ptw_io_requestor_0_resp_bits_pte_r), // @[Manager.scala:243:21]
.io_ptw_resp_bits_pte_v (_ptw_io_requestor_0_resp_bits_pte_v), // @[Manager.scala:243:21]
.io_ptw_resp_bits_level (_ptw_io_requestor_0_resp_bits_level), // @[Manager.scala:243:21]
.io_ptw_resp_bits_homogeneous (_ptw_io_requestor_0_resp_bits_homogeneous), // @[Manager.scala:243:21]
.io_ptw_resp_bits_gpa_valid (_ptw_io_requestor_0_resp_bits_gpa_valid), // @[Manager.scala:243:21]
.io_ptw_resp_bits_gpa_bits (_ptw_io_requestor_0_resp_bits_gpa_bits), // @[Manager.scala:243:21]
.io_ptw_resp_bits_gpa_is_pte (_ptw_io_requestor_0_resp_bits_gpa_is_pte), // @[Manager.scala:243:21]
.io_ptw_ptbr_mode (_ptw_io_requestor_0_ptbr_mode), // @[Manager.scala:243:21]
.io_ptw_ptbr_asid (_ptw_io_requestor_0_ptbr_asid), // @[Manager.scala:243:21]
.io_ptw_ptbr_ppn (_ptw_io_requestor_0_ptbr_ppn), // @[Manager.scala:243:21]
.io_ptw_status_debug (_ptw_io_requestor_0_status_debug), // @[Manager.scala:243:21]
.io_ptw_status_cease (_ptw_io_requestor_0_status_cease), // @[Manager.scala:243:21]
.io_ptw_status_wfi (_ptw_io_requestor_0_status_wfi), // @[Manager.scala:243:21]
.io_ptw_status_isa (_ptw_io_requestor_0_status_isa), // @[Manager.scala:243:21]
.io_ptw_status_dprv (_ptw_io_requestor_0_status_dprv), // @[Manager.scala:243:21]
.io_ptw_status_dv (_ptw_io_requestor_0_status_dv), // @[Manager.scala:243:21]
.io_ptw_status_prv (_ptw_io_requestor_0_status_prv), // @[Manager.scala:243:21]
.io_ptw_status_v (_ptw_io_requestor_0_status_v), // @[Manager.scala:243:21]
.io_ptw_status_sd (_ptw_io_requestor_0_status_sd), // @[Manager.scala:243:21]
.io_ptw_status_zero2 (_ptw_io_requestor_0_status_zero2), // @[Manager.scala:243:21]
.io_ptw_status_mpv (_ptw_io_requestor_0_status_mpv), // @[Manager.scala:243:21]
.io_ptw_status_gva (_ptw_io_requestor_0_status_gva), // @[Manager.scala:243:21]
.io_ptw_status_mbe (_ptw_io_requestor_0_status_mbe), // @[Manager.scala:243:21]
.io_ptw_status_sbe (_ptw_io_requestor_0_status_sbe), // @[Manager.scala:243:21]
.io_ptw_status_sxl (_ptw_io_requestor_0_status_sxl), // @[Manager.scala:243:21]
.io_ptw_status_uxl (_ptw_io_requestor_0_status_uxl), // @[Manager.scala:243:21]
.io_ptw_status_sd_rv32 (_ptw_io_requestor_0_status_sd_rv32), // @[Manager.scala:243:21]
.io_ptw_status_zero1 (_ptw_io_requestor_0_status_zero1), // @[Manager.scala:243:21]
.io_ptw_status_tsr (_ptw_io_requestor_0_status_tsr), // @[Manager.scala:243:21]
.io_ptw_status_tw (_ptw_io_requestor_0_status_tw), // @[Manager.scala:243:21]
.io_ptw_status_tvm (_ptw_io_requestor_0_status_tvm), // @[Manager.scala:243:21]
.io_ptw_status_mxr (_ptw_io_requestor_0_status_mxr), // @[Manager.scala:243:21]
.io_ptw_status_sum (_ptw_io_requestor_0_status_sum), // @[Manager.scala:243:21]
.io_ptw_status_mprv (_ptw_io_requestor_0_status_mprv), // @[Manager.scala:243:21]
.io_ptw_status_xs (_ptw_io_requestor_0_status_xs), // @[Manager.scala:243:21]
.io_ptw_status_fs (_ptw_io_requestor_0_status_fs), // @[Manager.scala:243:21]
.io_ptw_status_mpp (_ptw_io_requestor_0_status_mpp), // @[Manager.scala:243:21]
.io_ptw_status_vs (_ptw_io_requestor_0_status_vs), // @[Manager.scala:243:21]
.io_ptw_status_spp (_ptw_io_requestor_0_status_spp), // @[Manager.scala:243:21]
.io_ptw_status_mpie (_ptw_io_requestor_0_status_mpie), // @[Manager.scala:243:21]
.io_ptw_status_ube (_ptw_io_requestor_0_status_ube), // @[Manager.scala:243:21]
.io_ptw_status_spie (_ptw_io_requestor_0_status_spie), // @[Manager.scala:243:21]
.io_ptw_status_upie (_ptw_io_requestor_0_status_upie), // @[Manager.scala:243:21]
.io_ptw_status_mie (_ptw_io_requestor_0_status_mie), // @[Manager.scala:243:21]
.io_ptw_status_hie (_ptw_io_requestor_0_status_hie), // @[Manager.scala:243:21]
.io_ptw_status_sie (_ptw_io_requestor_0_status_sie), // @[Manager.scala:243:21]
.io_ptw_status_uie (_ptw_io_requestor_0_status_uie) // @[Manager.scala:243:21]
); // @[Manager.scala:226:61]
ReRoCCManagerControl_2 ctrl ( // @[Manager.scala:235:24]
.clock (clock),
.reset (reset),
.auto_ctrl_in_a_ready (auto_ctrl_ctrl_in_a_ready_0),
.auto_ctrl_in_a_valid (auto_ctrl_ctrl_in_a_valid_0), // @[Manager.scala:237:34]
.auto_ctrl_in_a_bits_opcode (auto_ctrl_ctrl_in_a_bits_opcode_0), // @[Manager.scala:237:34]
.auto_ctrl_in_a_bits_param (auto_ctrl_ctrl_in_a_bits_param_0), // @[Manager.scala:237:34]
.auto_ctrl_in_a_bits_size (auto_ctrl_ctrl_in_a_bits_size_0), // @[Manager.scala:237:34]
.auto_ctrl_in_a_bits_source (auto_ctrl_ctrl_in_a_bits_source_0), // @[Manager.scala:237:34]
.auto_ctrl_in_a_bits_address (auto_ctrl_ctrl_in_a_bits_address_0), // @[Manager.scala:237:34]
.auto_ctrl_in_a_bits_mask (auto_ctrl_ctrl_in_a_bits_mask_0), // @[Manager.scala:237:34]
.auto_ctrl_in_a_bits_data (auto_ctrl_ctrl_in_a_bits_data_0), // @[Manager.scala:237:34]
.auto_ctrl_in_a_bits_corrupt (auto_ctrl_ctrl_in_a_bits_corrupt_0), // @[Manager.scala:237:34]
.auto_ctrl_in_d_ready (auto_ctrl_ctrl_in_d_ready_0), // @[Manager.scala:237:34]
.auto_ctrl_in_d_valid (auto_ctrl_ctrl_in_d_valid_0),
.auto_ctrl_in_d_bits_opcode (auto_ctrl_ctrl_in_d_bits_opcode_0),
.auto_ctrl_in_d_bits_size (auto_ctrl_ctrl_in_d_bits_size_0),
.auto_ctrl_in_d_bits_source (auto_ctrl_ctrl_in_d_bits_source_0),
.auto_ctrl_in_d_bits_data (auto_ctrl_ctrl_in_d_bits_data_0),
.io_mgr_busy (accumulator_io_busy), // @[LazyRoCC.scala:122:7]
.io_rocc_busy (accumulator_io_busy) // @[LazyRoCC.scala:122:7]
); // @[Manager.scala:235:24]
HellaCacheArbiter_3 dcacheArb ( // @[Manager.scala:238:27]
.clock (clock),
.reset (reset),
.io_requestor_0_req_ready (_dcacheArb_io_requestor_0_req_ready),
.io_requestor_0_req_valid (_ptw_io_mem_req_valid), // @[Manager.scala:243:21]
.io_requestor_0_req_bits_addr (_ptw_io_mem_req_bits_addr), // @[Manager.scala:243:21]
.io_requestor_0_req_bits_dv (_ptw_io_mem_req_bits_dv), // @[Manager.scala:243:21]
.io_requestor_0_s1_kill (_ptw_io_mem_s1_kill), // @[Manager.scala:243:21]
.io_requestor_0_s2_nack (_dcacheArb_io_requestor_0_s2_nack),
.io_requestor_0_s2_nack_cause_raw (_dcacheArb_io_requestor_0_s2_nack_cause_raw),
.io_requestor_0_s2_uncached (_dcacheArb_io_requestor_0_s2_uncached),
.io_requestor_0_s2_paddr (_dcacheArb_io_requestor_0_s2_paddr),
.io_requestor_0_resp_valid (_dcacheArb_io_requestor_0_resp_valid),
.io_requestor_0_resp_bits_addr (_dcacheArb_io_requestor_0_resp_bits_addr),
.io_requestor_0_resp_bits_tag (_dcacheArb_io_requestor_0_resp_bits_tag),
.io_requestor_0_resp_bits_cmd (_dcacheArb_io_requestor_0_resp_bits_cmd),
.io_requestor_0_resp_bits_size (_dcacheArb_io_requestor_0_resp_bits_size),
.io_requestor_0_resp_bits_signed (_dcacheArb_io_requestor_0_resp_bits_signed),
.io_requestor_0_resp_bits_dprv (_dcacheArb_io_requestor_0_resp_bits_dprv),
.io_requestor_0_resp_bits_dv (_dcacheArb_io_requestor_0_resp_bits_dv),
.io_requestor_0_resp_bits_data (_dcacheArb_io_requestor_0_resp_bits_data),
.io_requestor_0_resp_bits_mask (_dcacheArb_io_requestor_0_resp_bits_mask),
.io_requestor_0_resp_bits_replay (_dcacheArb_io_requestor_0_resp_bits_replay),
.io_requestor_0_resp_bits_has_data (_dcacheArb_io_requestor_0_resp_bits_has_data),
.io_requestor_0_resp_bits_data_word_bypass (_dcacheArb_io_requestor_0_resp_bits_data_word_bypass),
.io_requestor_0_resp_bits_data_raw (_dcacheArb_io_requestor_0_resp_bits_data_raw),
.io_requestor_0_resp_bits_store_data (_dcacheArb_io_requestor_0_resp_bits_store_data),
.io_requestor_0_replay_next (_dcacheArb_io_requestor_0_replay_next),
.io_requestor_0_s2_xcpt_ma_ld (_dcacheArb_io_requestor_0_s2_xcpt_ma_ld),
.io_requestor_0_s2_xcpt_ma_st (_dcacheArb_io_requestor_0_s2_xcpt_ma_st),
.io_requestor_0_s2_xcpt_pf_ld (_dcacheArb_io_requestor_0_s2_xcpt_pf_ld),
.io_requestor_0_s2_xcpt_pf_st (_dcacheArb_io_requestor_0_s2_xcpt_pf_st),
.io_requestor_0_s2_xcpt_ae_ld (_dcacheArb_io_requestor_0_s2_xcpt_ae_ld),
.io_requestor_0_s2_xcpt_ae_st (_dcacheArb_io_requestor_0_s2_xcpt_ae_st),
.io_requestor_0_s2_gpa (_dcacheArb_io_requestor_0_s2_gpa),
.io_requestor_0_ordered (_dcacheArb_io_requestor_0_ordered),
.io_requestor_0_store_pending (_dcacheArb_io_requestor_0_store_pending),
.io_requestor_0_perf_acquire (_dcacheArb_io_requestor_0_perf_acquire),
.io_requestor_0_perf_release (_dcacheArb_io_requestor_0_perf_release),
.io_requestor_0_perf_grant (_dcacheArb_io_requestor_0_perf_grant),
.io_requestor_0_perf_tlbMiss (_dcacheArb_io_requestor_0_perf_tlbMiss),
.io_requestor_0_perf_blocked (_dcacheArb_io_requestor_0_perf_blocked),
.io_requestor_0_perf_canAcceptStoreThenLoad (_dcacheArb_io_requestor_0_perf_canAcceptStoreThenLoad),
.io_requestor_0_perf_canAcceptStoreThenRMW (_dcacheArb_io_requestor_0_perf_canAcceptStoreThenRMW),
.io_requestor_0_perf_canAcceptLoadThenLoad (_dcacheArb_io_requestor_0_perf_canAcceptLoadThenLoad),
.io_requestor_0_perf_storeBufferEmptyAfterLoad (_dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterLoad),
.io_requestor_0_perf_storeBufferEmptyAfterStore (_dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterStore),
.io_requestor_1_req_ready (_dcacheArb_io_requestor_1_req_ready),
.io_requestor_1_req_valid (_dcIF_io_cache_req_valid), // @[Manager.scala:255:22]
.io_requestor_1_req_bits_addr (_dcIF_io_cache_req_bits_addr), // @[Manager.scala:255:22]
.io_requestor_1_req_bits_tag (_dcIF_io_cache_req_bits_tag), // @[Manager.scala:255:22]
.io_requestor_1_req_bits_dprv (_dcIF_io_cache_req_bits_dprv), // @[Manager.scala:255:22]
.io_requestor_1_req_bits_dv (_dcIF_io_cache_req_bits_dv), // @[Manager.scala:255:22]
.io_requestor_1_s1_data_data (_dcIF_io_cache_s1_data_data), // @[Manager.scala:255:22]
.io_requestor_1_s1_data_mask (_dcIF_io_cache_s1_data_mask), // @[Manager.scala:255:22]
.io_requestor_1_s2_nack (_dcacheArb_io_requestor_1_s2_nack),
.io_requestor_1_s2_nack_cause_raw (_dcacheArb_io_requestor_1_s2_nack_cause_raw),
.io_requestor_1_s2_uncached (_dcacheArb_io_requestor_1_s2_uncached),
.io_requestor_1_s2_paddr (_dcacheArb_io_requestor_1_s2_paddr),
.io_requestor_1_resp_valid (_dcacheArb_io_requestor_1_resp_valid),
.io_requestor_1_resp_bits_addr (_dcacheArb_io_requestor_1_resp_bits_addr),
.io_requestor_1_resp_bits_tag (_dcacheArb_io_requestor_1_resp_bits_tag),
.io_requestor_1_resp_bits_cmd (_dcacheArb_io_requestor_1_resp_bits_cmd),
.io_requestor_1_resp_bits_size (_dcacheArb_io_requestor_1_resp_bits_size),
.io_requestor_1_resp_bits_signed (_dcacheArb_io_requestor_1_resp_bits_signed),
.io_requestor_1_resp_bits_dprv (_dcacheArb_io_requestor_1_resp_bits_dprv),
.io_requestor_1_resp_bits_dv (_dcacheArb_io_requestor_1_resp_bits_dv),
.io_requestor_1_resp_bits_data (_dcacheArb_io_requestor_1_resp_bits_data),
.io_requestor_1_resp_bits_mask (_dcacheArb_io_requestor_1_resp_bits_mask),
.io_requestor_1_resp_bits_replay (_dcacheArb_io_requestor_1_resp_bits_replay),
.io_requestor_1_resp_bits_has_data (_dcacheArb_io_requestor_1_resp_bits_has_data),
.io_requestor_1_resp_bits_data_word_bypass (_dcacheArb_io_requestor_1_resp_bits_data_word_bypass),
.io_requestor_1_resp_bits_data_raw (_dcacheArb_io_requestor_1_resp_bits_data_raw),
.io_requestor_1_resp_bits_store_data (_dcacheArb_io_requestor_1_resp_bits_store_data),
.io_requestor_1_replay_next (_dcacheArb_io_requestor_1_replay_next),
.io_requestor_1_s2_xcpt_ma_ld (_dcacheArb_io_requestor_1_s2_xcpt_ma_ld),
.io_requestor_1_s2_xcpt_ma_st (_dcacheArb_io_requestor_1_s2_xcpt_ma_st),
.io_requestor_1_s2_xcpt_pf_ld (_dcacheArb_io_requestor_1_s2_xcpt_pf_ld),
.io_requestor_1_s2_xcpt_pf_st (_dcacheArb_io_requestor_1_s2_xcpt_pf_st),
.io_requestor_1_s2_xcpt_ae_ld (_dcacheArb_io_requestor_1_s2_xcpt_ae_ld),
.io_requestor_1_s2_xcpt_ae_st (_dcacheArb_io_requestor_1_s2_xcpt_ae_st),
.io_requestor_1_s2_gpa (_dcacheArb_io_requestor_1_s2_gpa),
.io_requestor_1_ordered (_dcacheArb_io_requestor_1_ordered),
.io_requestor_1_store_pending (_dcacheArb_io_requestor_1_store_pending),
.io_requestor_1_perf_acquire (_dcacheArb_io_requestor_1_perf_acquire),
.io_requestor_1_perf_release (_dcacheArb_io_requestor_1_perf_release),
.io_requestor_1_perf_grant (_dcacheArb_io_requestor_1_perf_grant),
.io_requestor_1_perf_tlbMiss (_dcacheArb_io_requestor_1_perf_tlbMiss),
.io_requestor_1_perf_blocked (_dcacheArb_io_requestor_1_perf_blocked),
.io_requestor_1_perf_canAcceptStoreThenLoad (_dcacheArb_io_requestor_1_perf_canAcceptStoreThenLoad),
.io_requestor_1_perf_canAcceptStoreThenRMW (_dcacheArb_io_requestor_1_perf_canAcceptStoreThenRMW),
.io_requestor_1_perf_canAcceptLoadThenLoad (_dcacheArb_io_requestor_1_perf_canAcceptLoadThenLoad),
.io_requestor_1_perf_storeBufferEmptyAfterLoad (_dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterLoad),
.io_requestor_1_perf_storeBufferEmptyAfterStore (_dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterStore),
.io_mem_req_ready (_dcache_io_cpu_req_ready), // @[Manager.scala:226:61]
.io_mem_req_valid (_dcacheArb_io_mem_req_valid),
.io_mem_req_bits_addr (_dcacheArb_io_mem_req_bits_addr),
.io_mem_req_bits_tag (_dcacheArb_io_mem_req_bits_tag),
.io_mem_req_bits_dprv (_dcacheArb_io_mem_req_bits_dprv),
.io_mem_req_bits_dv (_dcacheArb_io_mem_req_bits_dv),
.io_mem_req_bits_phys (_dcacheArb_io_mem_req_bits_phys),
.io_mem_s1_kill (_dcacheArb_io_mem_s1_kill),
.io_mem_s1_data_data (_dcacheArb_io_mem_s1_data_data),
.io_mem_s1_data_mask (_dcacheArb_io_mem_s1_data_mask),
.io_mem_s2_nack (_dcache_io_cpu_s2_nack), // @[Manager.scala:226:61]
.io_mem_s2_nack_cause_raw (_dcache_io_cpu_s2_nack_cause_raw), // @[Manager.scala:226:61]
.io_mem_s2_uncached (_dcache_io_cpu_s2_uncached), // @[Manager.scala:226:61]
.io_mem_s2_paddr (_dcache_io_cpu_s2_paddr), // @[Manager.scala:226:61]
.io_mem_resp_valid (_dcache_io_cpu_resp_valid), // @[Manager.scala:226:61]
.io_mem_resp_bits_addr (_dcache_io_cpu_resp_bits_addr), // @[Manager.scala:226:61]
.io_mem_resp_bits_tag (_dcache_io_cpu_resp_bits_tag), // @[Manager.scala:226:61]
.io_mem_resp_bits_cmd (_dcache_io_cpu_resp_bits_cmd), // @[Manager.scala:226:61]
.io_mem_resp_bits_size (_dcache_io_cpu_resp_bits_size), // @[Manager.scala:226:61]
.io_mem_resp_bits_signed (_dcache_io_cpu_resp_bits_signed), // @[Manager.scala:226:61]
.io_mem_resp_bits_dprv (_dcache_io_cpu_resp_bits_dprv), // @[Manager.scala:226:61]
.io_mem_resp_bits_dv (_dcache_io_cpu_resp_bits_dv), // @[Manager.scala:226:61]
.io_mem_resp_bits_data (_dcache_io_cpu_resp_bits_data), // @[Manager.scala:226:61]
.io_mem_resp_bits_mask (_dcache_io_cpu_resp_bits_mask), // @[Manager.scala:226:61]
.io_mem_resp_bits_replay (_dcache_io_cpu_resp_bits_replay), // @[Manager.scala:226:61]
.io_mem_resp_bits_has_data (_dcache_io_cpu_resp_bits_has_data), // @[Manager.scala:226:61]
.io_mem_resp_bits_data_word_bypass (_dcache_io_cpu_resp_bits_data_word_bypass), // @[Manager.scala:226:61]
.io_mem_resp_bits_data_raw (_dcache_io_cpu_resp_bits_data_raw), // @[Manager.scala:226:61]
.io_mem_resp_bits_store_data (_dcache_io_cpu_resp_bits_store_data), // @[Manager.scala:226:61]
.io_mem_replay_next (_dcache_io_cpu_replay_next), // @[Manager.scala:226:61]
.io_mem_s2_xcpt_ma_ld (_dcache_io_cpu_s2_xcpt_ma_ld), // @[Manager.scala:226:61]
.io_mem_s2_xcpt_ma_st (_dcache_io_cpu_s2_xcpt_ma_st), // @[Manager.scala:226:61]
.io_mem_s2_xcpt_pf_ld (_dcache_io_cpu_s2_xcpt_pf_ld), // @[Manager.scala:226:61]
.io_mem_s2_xcpt_pf_st (_dcache_io_cpu_s2_xcpt_pf_st), // @[Manager.scala:226:61]
.io_mem_s2_xcpt_ae_ld (_dcache_io_cpu_s2_xcpt_ae_ld), // @[Manager.scala:226:61]
.io_mem_s2_xcpt_ae_st (_dcache_io_cpu_s2_xcpt_ae_st), // @[Manager.scala:226:61]
.io_mem_s2_gpa (_dcache_io_cpu_s2_gpa), // @[Manager.scala:226:61]
.io_mem_ordered (_dcache_io_cpu_ordered), // @[Manager.scala:226:61]
.io_mem_store_pending (_dcache_io_cpu_store_pending), // @[Manager.scala:226:61]
.io_mem_perf_acquire (_dcache_io_cpu_perf_acquire), // @[Manager.scala:226:61]
.io_mem_perf_release (_dcache_io_cpu_perf_release), // @[Manager.scala:226:61]
.io_mem_perf_grant (_dcache_io_cpu_perf_grant), // @[Manager.scala:226:61]
.io_mem_perf_tlbMiss (_dcache_io_cpu_perf_tlbMiss), // @[Manager.scala:226:61]
.io_mem_perf_blocked (_dcache_io_cpu_perf_blocked), // @[Manager.scala:226:61]
.io_mem_perf_canAcceptStoreThenLoad (_dcache_io_cpu_perf_canAcceptStoreThenLoad), // @[Manager.scala:226:61]
.io_mem_perf_canAcceptStoreThenRMW (_dcache_io_cpu_perf_canAcceptStoreThenRMW), // @[Manager.scala:226:61]
.io_mem_perf_canAcceptLoadThenLoad (_dcache_io_cpu_perf_canAcceptLoadThenLoad), // @[Manager.scala:226:61]
.io_mem_perf_storeBufferEmptyAfterLoad (_dcache_io_cpu_perf_storeBufferEmptyAfterLoad), // @[Manager.scala:226:61]
.io_mem_perf_storeBufferEmptyAfterStore (_dcache_io_cpu_perf_storeBufferEmptyAfterStore) // @[Manager.scala:226:61]
); // @[Manager.scala:238:27]
PTW_3 ptw ( // @[Manager.scala:243:21]
.clock (clock),
.reset (reset),
.io_requestor_0_req_ready (_ptw_io_requestor_0_req_ready),
.io_requestor_0_req_valid (_dcache_io_ptw_req_valid), // @[Manager.scala:226:61]
.io_requestor_0_req_bits_bits_addr (_dcache_io_ptw_req_bits_bits_addr), // @[Manager.scala:226:61]
.io_requestor_0_req_bits_bits_need_gpa (_dcache_io_ptw_req_bits_bits_need_gpa), // @[Manager.scala:226:61]
.io_requestor_0_resp_valid (_ptw_io_requestor_0_resp_valid),
.io_requestor_0_resp_bits_ae_ptw (_ptw_io_requestor_0_resp_bits_ae_ptw),
.io_requestor_0_resp_bits_ae_final (_ptw_io_requestor_0_resp_bits_ae_final),
.io_requestor_0_resp_bits_pf (_ptw_io_requestor_0_resp_bits_pf),
.io_requestor_0_resp_bits_gf (_ptw_io_requestor_0_resp_bits_gf),
.io_requestor_0_resp_bits_hr (_ptw_io_requestor_0_resp_bits_hr),
.io_requestor_0_resp_bits_hw (_ptw_io_requestor_0_resp_bits_hw),
.io_requestor_0_resp_bits_hx (_ptw_io_requestor_0_resp_bits_hx),
.io_requestor_0_resp_bits_pte_reserved_for_future (_ptw_io_requestor_0_resp_bits_pte_reserved_for_future),
.io_requestor_0_resp_bits_pte_ppn (_ptw_io_requestor_0_resp_bits_pte_ppn),
.io_requestor_0_resp_bits_pte_reserved_for_software (_ptw_io_requestor_0_resp_bits_pte_reserved_for_software),
.io_requestor_0_resp_bits_pte_d (_ptw_io_requestor_0_resp_bits_pte_d),
.io_requestor_0_resp_bits_pte_a (_ptw_io_requestor_0_resp_bits_pte_a),
.io_requestor_0_resp_bits_pte_g (_ptw_io_requestor_0_resp_bits_pte_g),
.io_requestor_0_resp_bits_pte_u (_ptw_io_requestor_0_resp_bits_pte_u),
.io_requestor_0_resp_bits_pte_x (_ptw_io_requestor_0_resp_bits_pte_x),
.io_requestor_0_resp_bits_pte_w (_ptw_io_requestor_0_resp_bits_pte_w),
.io_requestor_0_resp_bits_pte_r (_ptw_io_requestor_0_resp_bits_pte_r),
.io_requestor_0_resp_bits_pte_v (_ptw_io_requestor_0_resp_bits_pte_v),
.io_requestor_0_resp_bits_level (_ptw_io_requestor_0_resp_bits_level),
.io_requestor_0_resp_bits_homogeneous (_ptw_io_requestor_0_resp_bits_homogeneous),
.io_requestor_0_resp_bits_gpa_valid (_ptw_io_requestor_0_resp_bits_gpa_valid),
.io_requestor_0_resp_bits_gpa_bits (_ptw_io_requestor_0_resp_bits_gpa_bits),
.io_requestor_0_resp_bits_gpa_is_pte (_ptw_io_requestor_0_resp_bits_gpa_is_pte),
.io_requestor_0_ptbr_mode (_ptw_io_requestor_0_ptbr_mode),
.io_requestor_0_ptbr_asid (_ptw_io_requestor_0_ptbr_asid),
.io_requestor_0_ptbr_ppn (_ptw_io_requestor_0_ptbr_ppn),
.io_requestor_0_status_debug (_ptw_io_requestor_0_status_debug),
.io_requestor_0_status_cease (_ptw_io_requestor_0_status_cease),
.io_requestor_0_status_wfi (_ptw_io_requestor_0_status_wfi),
.io_requestor_0_status_isa (_ptw_io_requestor_0_status_isa),
.io_requestor_0_status_dprv (_ptw_io_requestor_0_status_dprv),
.io_requestor_0_status_dv (_ptw_io_requestor_0_status_dv),
.io_requestor_0_status_prv (_ptw_io_requestor_0_status_prv),
.io_requestor_0_status_v (_ptw_io_requestor_0_status_v),
.io_requestor_0_status_sd (_ptw_io_requestor_0_status_sd),
.io_requestor_0_status_zero2 (_ptw_io_requestor_0_status_zero2),
.io_requestor_0_status_mpv (_ptw_io_requestor_0_status_mpv),
.io_requestor_0_status_gva (_ptw_io_requestor_0_status_gva),
.io_requestor_0_status_mbe (_ptw_io_requestor_0_status_mbe),
.io_requestor_0_status_sbe (_ptw_io_requestor_0_status_sbe),
.io_requestor_0_status_sxl (_ptw_io_requestor_0_status_sxl),
.io_requestor_0_status_uxl (_ptw_io_requestor_0_status_uxl),
.io_requestor_0_status_sd_rv32 (_ptw_io_requestor_0_status_sd_rv32),
.io_requestor_0_status_zero1 (_ptw_io_requestor_0_status_zero1),
.io_requestor_0_status_tsr (_ptw_io_requestor_0_status_tsr),
.io_requestor_0_status_tw (_ptw_io_requestor_0_status_tw),
.io_requestor_0_status_tvm (_ptw_io_requestor_0_status_tvm),
.io_requestor_0_status_mxr (_ptw_io_requestor_0_status_mxr),
.io_requestor_0_status_sum (_ptw_io_requestor_0_status_sum),
.io_requestor_0_status_mprv (_ptw_io_requestor_0_status_mprv),
.io_requestor_0_status_xs (_ptw_io_requestor_0_status_xs),
.io_requestor_0_status_fs (_ptw_io_requestor_0_status_fs),
.io_requestor_0_status_mpp (_ptw_io_requestor_0_status_mpp),
.io_requestor_0_status_vs (_ptw_io_requestor_0_status_vs),
.io_requestor_0_status_spp (_ptw_io_requestor_0_status_spp),
.io_requestor_0_status_mpie (_ptw_io_requestor_0_status_mpie),
.io_requestor_0_status_ube (_ptw_io_requestor_0_status_ube),
.io_requestor_0_status_spie (_ptw_io_requestor_0_status_spie),
.io_requestor_0_status_upie (_ptw_io_requestor_0_status_upie),
.io_requestor_0_status_mie (_ptw_io_requestor_0_status_mie),
.io_requestor_0_status_hie (_ptw_io_requestor_0_status_hie),
.io_requestor_0_status_sie (_ptw_io_requestor_0_status_sie),
.io_requestor_0_status_uie (_ptw_io_requestor_0_status_uie),
.io_mem_req_ready (_dcacheArb_io_requestor_0_req_ready), // @[Manager.scala:238:27]
.io_mem_req_valid (_ptw_io_mem_req_valid),
.io_mem_req_bits_addr (_ptw_io_mem_req_bits_addr),
.io_mem_req_bits_dv (_ptw_io_mem_req_bits_dv),
.io_mem_s1_kill (_ptw_io_mem_s1_kill),
.io_mem_s2_nack (_dcacheArb_io_requestor_0_s2_nack), // @[Manager.scala:238:27]
.io_mem_s2_nack_cause_raw (_dcacheArb_io_requestor_0_s2_nack_cause_raw), // @[Manager.scala:238:27]
.io_mem_s2_uncached (_dcacheArb_io_requestor_0_s2_uncached), // @[Manager.scala:238:27]
.io_mem_s2_paddr (_dcacheArb_io_requestor_0_s2_paddr), // @[Manager.scala:238:27]
.io_mem_resp_valid (_dcacheArb_io_requestor_0_resp_valid), // @[Manager.scala:238:27]
.io_mem_resp_bits_addr (_dcacheArb_io_requestor_0_resp_bits_addr), // @[Manager.scala:238:27]
.io_mem_resp_bits_tag (_dcacheArb_io_requestor_0_resp_bits_tag), // @[Manager.scala:238:27]
.io_mem_resp_bits_cmd (_dcacheArb_io_requestor_0_resp_bits_cmd), // @[Manager.scala:238:27]
.io_mem_resp_bits_size (_dcacheArb_io_requestor_0_resp_bits_size), // @[Manager.scala:238:27]
.io_mem_resp_bits_signed (_dcacheArb_io_requestor_0_resp_bits_signed), // @[Manager.scala:238:27]
.io_mem_resp_bits_dprv (_dcacheArb_io_requestor_0_resp_bits_dprv), // @[Manager.scala:238:27]
.io_mem_resp_bits_dv (_dcacheArb_io_requestor_0_resp_bits_dv), // @[Manager.scala:238:27]
.io_mem_resp_bits_data (_dcacheArb_io_requestor_0_resp_bits_data), // @[Manager.scala:238:27]
.io_mem_resp_bits_mask (_dcacheArb_io_requestor_0_resp_bits_mask), // @[Manager.scala:238:27]
.io_mem_resp_bits_replay (_dcacheArb_io_requestor_0_resp_bits_replay), // @[Manager.scala:238:27]
.io_mem_resp_bits_has_data (_dcacheArb_io_requestor_0_resp_bits_has_data), // @[Manager.scala:238:27]
.io_mem_resp_bits_data_word_bypass (_dcacheArb_io_requestor_0_resp_bits_data_word_bypass), // @[Manager.scala:238:27]
.io_mem_resp_bits_data_raw (_dcacheArb_io_requestor_0_resp_bits_data_raw), // @[Manager.scala:238:27]
.io_mem_resp_bits_store_data (_dcacheArb_io_requestor_0_resp_bits_store_data), // @[Manager.scala:238:27]
.io_mem_replay_next (_dcacheArb_io_requestor_0_replay_next), // @[Manager.scala:238:27]
.io_mem_s2_xcpt_ma_ld (_dcacheArb_io_requestor_0_s2_xcpt_ma_ld), // @[Manager.scala:238:27]
.io_mem_s2_xcpt_ma_st (_dcacheArb_io_requestor_0_s2_xcpt_ma_st), // @[Manager.scala:238:27]
.io_mem_s2_xcpt_pf_ld (_dcacheArb_io_requestor_0_s2_xcpt_pf_ld), // @[Manager.scala:238:27]
.io_mem_s2_xcpt_pf_st (_dcacheArb_io_requestor_0_s2_xcpt_pf_st), // @[Manager.scala:238:27]
.io_mem_s2_xcpt_ae_ld (_dcacheArb_io_requestor_0_s2_xcpt_ae_ld), // @[Manager.scala:238:27]
.io_mem_s2_xcpt_ae_st (_dcacheArb_io_requestor_0_s2_xcpt_ae_st), // @[Manager.scala:238:27]
.io_mem_s2_gpa (_dcacheArb_io_requestor_0_s2_gpa), // @[Manager.scala:238:27]
.io_mem_ordered (_dcacheArb_io_requestor_0_ordered), // @[Manager.scala:238:27]
.io_mem_store_pending (_dcacheArb_io_requestor_0_store_pending), // @[Manager.scala:238:27]
.io_mem_perf_acquire (_dcacheArb_io_requestor_0_perf_acquire), // @[Manager.scala:238:27]
.io_mem_perf_release (_dcacheArb_io_requestor_0_perf_release), // @[Manager.scala:238:27]
.io_mem_perf_grant (_dcacheArb_io_requestor_0_perf_grant), // @[Manager.scala:238:27]
.io_mem_perf_tlbMiss (_dcacheArb_io_requestor_0_perf_tlbMiss), // @[Manager.scala:238:27]
.io_mem_perf_blocked (_dcacheArb_io_requestor_0_perf_blocked), // @[Manager.scala:238:27]
.io_mem_perf_canAcceptStoreThenLoad (_dcacheArb_io_requestor_0_perf_canAcceptStoreThenLoad), // @[Manager.scala:238:27]
.io_mem_perf_canAcceptStoreThenRMW (_dcacheArb_io_requestor_0_perf_canAcceptStoreThenRMW), // @[Manager.scala:238:27]
.io_mem_perf_canAcceptLoadThenLoad (_dcacheArb_io_requestor_0_perf_canAcceptLoadThenLoad), // @[Manager.scala:238:27]
.io_mem_perf_storeBufferEmptyAfterLoad (_dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterLoad), // @[Manager.scala:238:27]
.io_mem_perf_storeBufferEmptyAfterStore (_dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterStore), // @[Manager.scala:238:27]
.io_dpath_ptbr_mode (_rerocc_manager_io_ptw_ptbr_mode), // @[Manager.scala:209:34]
.io_dpath_ptbr_asid (_rerocc_manager_io_ptw_ptbr_asid), // @[Manager.scala:209:34]
.io_dpath_ptbr_ppn (_rerocc_manager_io_ptw_ptbr_ppn), // @[Manager.scala:209:34]
.io_dpath_sfence_valid (_rerocc_manager_io_ptw_sfence_valid), // @[Manager.scala:209:34]
.io_dpath_status_debug (_rerocc_manager_io_ptw_status_debug), // @[Manager.scala:209:34]
.io_dpath_status_cease (_rerocc_manager_io_ptw_status_cease), // @[Manager.scala:209:34]
.io_dpath_status_wfi (_rerocc_manager_io_ptw_status_wfi), // @[Manager.scala:209:34]
.io_dpath_status_isa (_rerocc_manager_io_ptw_status_isa), // @[Manager.scala:209:34]
.io_dpath_status_dprv (_rerocc_manager_io_ptw_status_dprv), // @[Manager.scala:209:34]
.io_dpath_status_dv (_rerocc_manager_io_ptw_status_dv), // @[Manager.scala:209:34]
.io_dpath_status_prv (_rerocc_manager_io_ptw_status_prv), // @[Manager.scala:209:34]
.io_dpath_status_v (_rerocc_manager_io_ptw_status_v), // @[Manager.scala:209:34]
.io_dpath_status_sd (_rerocc_manager_io_ptw_status_sd), // @[Manager.scala:209:34]
.io_dpath_status_zero2 (_rerocc_manager_io_ptw_status_zero2), // @[Manager.scala:209:34]
.io_dpath_status_mpv (_rerocc_manager_io_ptw_status_mpv), // @[Manager.scala:209:34]
.io_dpath_status_gva (_rerocc_manager_io_ptw_status_gva), // @[Manager.scala:209:34]
.io_dpath_status_mbe (_rerocc_manager_io_ptw_status_mbe), // @[Manager.scala:209:34]
.io_dpath_status_sbe (_rerocc_manager_io_ptw_status_sbe), // @[Manager.scala:209:34]
.io_dpath_status_sxl (_rerocc_manager_io_ptw_status_sxl), // @[Manager.scala:209:34]
.io_dpath_status_uxl (_rerocc_manager_io_ptw_status_uxl), // @[Manager.scala:209:34]
.io_dpath_status_sd_rv32 (_rerocc_manager_io_ptw_status_sd_rv32), // @[Manager.scala:209:34]
.io_dpath_status_zero1 (_rerocc_manager_io_ptw_status_zero1), // @[Manager.scala:209:34]
.io_dpath_status_tsr (_rerocc_manager_io_ptw_status_tsr), // @[Manager.scala:209:34]
.io_dpath_status_tw (_rerocc_manager_io_ptw_status_tw), // @[Manager.scala:209:34]
.io_dpath_status_tvm (_rerocc_manager_io_ptw_status_tvm), // @[Manager.scala:209:34]
.io_dpath_status_mxr (_rerocc_manager_io_ptw_status_mxr), // @[Manager.scala:209:34]
.io_dpath_status_sum (_rerocc_manager_io_ptw_status_sum), // @[Manager.scala:209:34]
.io_dpath_status_mprv (_rerocc_manager_io_ptw_status_mprv), // @[Manager.scala:209:34]
.io_dpath_status_xs (_rerocc_manager_io_ptw_status_xs), // @[Manager.scala:209:34]
.io_dpath_status_fs (_rerocc_manager_io_ptw_status_fs), // @[Manager.scala:209:34]
.io_dpath_status_mpp (_rerocc_manager_io_ptw_status_mpp), // @[Manager.scala:209:34]
.io_dpath_status_vs (_rerocc_manager_io_ptw_status_vs), // @[Manager.scala:209:34]
.io_dpath_status_spp (_rerocc_manager_io_ptw_status_spp), // @[Manager.scala:209:34]
.io_dpath_status_mpie (_rerocc_manager_io_ptw_status_mpie), // @[Manager.scala:209:34]
.io_dpath_status_ube (_rerocc_manager_io_ptw_status_ube), // @[Manager.scala:209:34]
.io_dpath_status_spie (_rerocc_manager_io_ptw_status_spie), // @[Manager.scala:209:34]
.io_dpath_status_upie (_rerocc_manager_io_ptw_status_upie), // @[Manager.scala:209:34]
.io_dpath_status_mie (_rerocc_manager_io_ptw_status_mie), // @[Manager.scala:209:34]
.io_dpath_status_hie (_rerocc_manager_io_ptw_status_hie), // @[Manager.scala:209:34]
.io_dpath_status_sie (_rerocc_manager_io_ptw_status_sie), // @[Manager.scala:209:34]
.io_dpath_status_uie (_rerocc_manager_io_ptw_status_uie), // @[Manager.scala:209:34]
.io_dpath_perf_pte_miss (_ptw_io_dpath_perf_pte_miss),
.io_dpath_clock_enabled (_ptw_io_dpath_clock_enabled)
); // @[Manager.scala:243:21]
SimpleHellaCacheIF_3 dcIF ( // @[Manager.scala:255:22]
.clock (clock),
.reset (reset),
.io_requestor_req_ready (accumulator_io_mem_req_ready),
.io_requestor_req_valid (accumulator_io_mem_req_valid), // @[LazyRoCC.scala:122:7]
.io_requestor_req_bits_addr (accumulator_io_mem_req_bits_addr), // @[LazyRoCC.scala:122:7]
.io_requestor_req_bits_tag (accumulator_io_mem_req_bits_tag), // @[LazyRoCC.scala:122:7]
.io_requestor_req_bits_dprv (accumulator_io_mem_req_bits_dprv), // @[LazyRoCC.scala:122:7]
.io_requestor_req_bits_dv (accumulator_io_mem_req_bits_dv), // @[LazyRoCC.scala:122:7]
.io_requestor_resp_valid (accumulator_io_mem_resp_valid),
.io_requestor_resp_bits_addr (accumulator_io_mem_resp_bits_addr),
.io_requestor_resp_bits_tag (accumulator_io_mem_resp_bits_tag),
.io_requestor_resp_bits_cmd (accumulator_io_mem_resp_bits_cmd),
.io_requestor_resp_bits_size (accumulator_io_mem_resp_bits_size),
.io_requestor_resp_bits_signed (accumulator_io_mem_resp_bits_signed),
.io_requestor_resp_bits_dprv (accumulator_io_mem_resp_bits_dprv),
.io_requestor_resp_bits_dv (accumulator_io_mem_resp_bits_dv),
.io_requestor_resp_bits_data (accumulator_io_mem_resp_bits_data),
.io_requestor_resp_bits_mask (accumulator_io_mem_resp_bits_mask),
.io_requestor_resp_bits_replay (accumulator_io_mem_resp_bits_replay),
.io_requestor_resp_bits_has_data (accumulator_io_mem_resp_bits_has_data),
.io_requestor_resp_bits_data_word_bypass (accumulator_io_mem_resp_bits_data_word_bypass),
.io_requestor_resp_bits_data_raw (accumulator_io_mem_resp_bits_data_raw),
.io_requestor_resp_bits_store_data (accumulator_io_mem_resp_bits_store_data),
.io_cache_req_ready (_dcacheArb_io_requestor_1_req_ready), // @[Manager.scala:238:27]
.io_cache_req_valid (_dcIF_io_cache_req_valid),
.io_cache_req_bits_addr (_dcIF_io_cache_req_bits_addr),
.io_cache_req_bits_tag (_dcIF_io_cache_req_bits_tag),
.io_cache_req_bits_dprv (_dcIF_io_cache_req_bits_dprv),
.io_cache_req_bits_dv (_dcIF_io_cache_req_bits_dv),
.io_cache_s1_data_data (_dcIF_io_cache_s1_data_data),
.io_cache_s1_data_mask (_dcIF_io_cache_s1_data_mask),
.io_cache_s2_nack (_dcacheArb_io_requestor_1_s2_nack), // @[Manager.scala:238:27]
.io_cache_s2_nack_cause_raw (_dcacheArb_io_requestor_1_s2_nack_cause_raw), // @[Manager.scala:238:27]
.io_cache_s2_uncached (_dcacheArb_io_requestor_1_s2_uncached), // @[Manager.scala:238:27]
.io_cache_s2_paddr (_dcacheArb_io_requestor_1_s2_paddr), // @[Manager.scala:238:27]
.io_cache_resp_valid (_dcacheArb_io_requestor_1_resp_valid), // @[Manager.scala:238:27]
.io_cache_resp_bits_addr (_dcacheArb_io_requestor_1_resp_bits_addr), // @[Manager.scala:238:27]
.io_cache_resp_bits_tag (_dcacheArb_io_requestor_1_resp_bits_tag), // @[Manager.scala:238:27]
.io_cache_resp_bits_cmd (_dcacheArb_io_requestor_1_resp_bits_cmd), // @[Manager.scala:238:27]
.io_cache_resp_bits_size (_dcacheArb_io_requestor_1_resp_bits_size), // @[Manager.scala:238:27]
.io_cache_resp_bits_signed (_dcacheArb_io_requestor_1_resp_bits_signed), // @[Manager.scala:238:27]
.io_cache_resp_bits_dprv (_dcacheArb_io_requestor_1_resp_bits_dprv), // @[Manager.scala:238:27]
.io_cache_resp_bits_dv (_dcacheArb_io_requestor_1_resp_bits_dv), // @[Manager.scala:238:27]
.io_cache_resp_bits_data (_dcacheArb_io_requestor_1_resp_bits_data), // @[Manager.scala:238:27]
.io_cache_resp_bits_mask (_dcacheArb_io_requestor_1_resp_bits_mask), // @[Manager.scala:238:27]
.io_cache_resp_bits_replay (_dcacheArb_io_requestor_1_resp_bits_replay), // @[Manager.scala:238:27]
.io_cache_resp_bits_has_data (_dcacheArb_io_requestor_1_resp_bits_has_data), // @[Manager.scala:238:27]
.io_cache_resp_bits_data_word_bypass (_dcacheArb_io_requestor_1_resp_bits_data_word_bypass), // @[Manager.scala:238:27]
.io_cache_resp_bits_data_raw (_dcacheArb_io_requestor_1_resp_bits_data_raw), // @[Manager.scala:238:27]
.io_cache_resp_bits_store_data (_dcacheArb_io_requestor_1_resp_bits_store_data), // @[Manager.scala:238:27]
.io_cache_replay_next (_dcacheArb_io_requestor_1_replay_next), // @[Manager.scala:238:27]
.io_cache_s2_xcpt_ma_ld (_dcacheArb_io_requestor_1_s2_xcpt_ma_ld), // @[Manager.scala:238:27]
.io_cache_s2_xcpt_ma_st (_dcacheArb_io_requestor_1_s2_xcpt_ma_st), // @[Manager.scala:238:27]
.io_cache_s2_xcpt_pf_ld (_dcacheArb_io_requestor_1_s2_xcpt_pf_ld), // @[Manager.scala:238:27]
.io_cache_s2_xcpt_pf_st (_dcacheArb_io_requestor_1_s2_xcpt_pf_st), // @[Manager.scala:238:27]
.io_cache_s2_xcpt_ae_ld (_dcacheArb_io_requestor_1_s2_xcpt_ae_ld), // @[Manager.scala:238:27]
.io_cache_s2_xcpt_ae_st (_dcacheArb_io_requestor_1_s2_xcpt_ae_st), // @[Manager.scala:238:27]
.io_cache_s2_gpa (_dcacheArb_io_requestor_1_s2_gpa), // @[Manager.scala:238:27]
.io_cache_ordered (_dcacheArb_io_requestor_1_ordered), // @[Manager.scala:238:27]
.io_cache_store_pending (_dcacheArb_io_requestor_1_store_pending), // @[Manager.scala:238:27]
.io_cache_perf_acquire (_dcacheArb_io_requestor_1_perf_acquire), // @[Manager.scala:238:27]
.io_cache_perf_release (_dcacheArb_io_requestor_1_perf_release), // @[Manager.scala:238:27]
.io_cache_perf_grant (_dcacheArb_io_requestor_1_perf_grant), // @[Manager.scala:238:27]
.io_cache_perf_tlbMiss (_dcacheArb_io_requestor_1_perf_tlbMiss), // @[Manager.scala:238:27]
.io_cache_perf_blocked (_dcacheArb_io_requestor_1_perf_blocked), // @[Manager.scala:238:27]
.io_cache_perf_canAcceptStoreThenLoad (_dcacheArb_io_requestor_1_perf_canAcceptStoreThenLoad), // @[Manager.scala:238:27]
.io_cache_perf_canAcceptStoreThenRMW (_dcacheArb_io_requestor_1_perf_canAcceptStoreThenRMW), // @[Manager.scala:238:27]
.io_cache_perf_canAcceptLoadThenLoad (_dcacheArb_io_requestor_1_perf_canAcceptLoadThenLoad), // @[Manager.scala:238:27]
.io_cache_perf_storeBufferEmptyAfterLoad (_dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterLoad), // @[Manager.scala:238:27]
.io_cache_perf_storeBufferEmptyAfterStore (_dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterStore) // @[Manager.scala:238:27]
); // @[Manager.scala:255:22]
assign auto_ctrl_ctrl_in_a_ready = auto_ctrl_ctrl_in_a_ready_0; // @[Manager.scala:237:34]
assign auto_ctrl_ctrl_in_d_valid = auto_ctrl_ctrl_in_d_valid_0; // @[Manager.scala:237:34]
assign auto_ctrl_ctrl_in_d_bits_opcode = auto_ctrl_ctrl_in_d_bits_opcode_0; // @[Manager.scala:237:34]
assign auto_ctrl_ctrl_in_d_bits_size = auto_ctrl_ctrl_in_d_bits_size_0; // @[Manager.scala:237:34]
assign auto_ctrl_ctrl_in_d_bits_source = auto_ctrl_ctrl_in_d_bits_source_0; // @[Manager.scala:237:34]
assign auto_ctrl_ctrl_in_d_bits_data = auto_ctrl_ctrl_in_d_bits_data_0; // @[Manager.scala:237:34]
assign auto_buffer_out_a_valid = auto_buffer_out_a_valid_0; // @[Manager.scala:237:34]
assign auto_buffer_out_a_bits_opcode = auto_buffer_out_a_bits_opcode_0; // @[Manager.scala:237:34]
assign auto_buffer_out_a_bits_param = auto_buffer_out_a_bits_param_0; // @[Manager.scala:237:34]
assign auto_buffer_out_a_bits_size = auto_buffer_out_a_bits_size_0; // @[Manager.scala:237:34]
assign auto_buffer_out_a_bits_source = auto_buffer_out_a_bits_source_0; // @[Manager.scala:237:34]
assign auto_buffer_out_a_bits_address = auto_buffer_out_a_bits_address_0; // @[Manager.scala:237:34]
assign auto_buffer_out_a_bits_mask = auto_buffer_out_a_bits_mask_0; // @[Manager.scala:237:34]
assign auto_buffer_out_a_bits_data = auto_buffer_out_a_bits_data_0; // @[Manager.scala:237:34]
assign auto_buffer_out_a_bits_corrupt = auto_buffer_out_a_bits_corrupt_0; // @[Manager.scala:237:34]
assign auto_buffer_out_b_ready = auto_buffer_out_b_ready_0; // @[Manager.scala:237:34]
assign auto_buffer_out_c_valid = auto_buffer_out_c_valid_0; // @[Manager.scala:237:34]
assign auto_buffer_out_c_bits_opcode = auto_buffer_out_c_bits_opcode_0; // @[Manager.scala:237:34]
assign auto_buffer_out_c_bits_param = auto_buffer_out_c_bits_param_0; // @[Manager.scala:237:34]
assign auto_buffer_out_c_bits_size = auto_buffer_out_c_bits_size_0; // @[Manager.scala:237:34]
assign auto_buffer_out_c_bits_source = auto_buffer_out_c_bits_source_0; // @[Manager.scala:237:34]
assign auto_buffer_out_c_bits_address = auto_buffer_out_c_bits_address_0; // @[Manager.scala:237:34]
assign auto_buffer_out_c_bits_data = auto_buffer_out_c_bits_data_0; // @[Manager.scala:237:34]
assign auto_buffer_out_c_bits_corrupt = auto_buffer_out_c_bits_corrupt_0; // @[Manager.scala:237:34]
assign auto_buffer_out_d_ready = auto_buffer_out_d_ready_0; // @[Manager.scala:237:34]
assign auto_buffer_out_e_valid = auto_buffer_out_e_valid_0; // @[Manager.scala:237:34]
assign auto_buffer_out_e_bits_sink = auto_buffer_out_e_bits_sink_0; // @[Manager.scala:237:34]
assign auto_re_ro_cc_in_req_ready = auto_re_ro_cc_in_req_ready_0; // @[Manager.scala:237:34]
assign auto_re_ro_cc_in_resp_valid = auto_re_ro_cc_in_resp_valid_0; // @[Manager.scala:237:34]
assign auto_re_ro_cc_in_resp_bits_opcode = auto_re_ro_cc_in_resp_bits_opcode_0; // @[Manager.scala:237:34]
assign auto_re_ro_cc_in_resp_bits_client_id = auto_re_ro_cc_in_resp_bits_client_id_0; // @[Manager.scala:237:34]
assign auto_re_ro_cc_in_resp_bits_manager_id = auto_re_ro_cc_in_resp_bits_manager_id_0; // @[Manager.scala:237:34]
assign auto_re_ro_cc_in_resp_bits_data = auto_re_ro_cc_in_resp_bits_data_0; // @[Manager.scala:237:34]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File InputUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{FlowRoutingBundle}
import constellation.noc.{HasNoCParams}
class AbstractInputUnitIO(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams],
)(implicit val p: Parameters) extends Bundle with HasRouterOutputParams {
val nodeId = cParam.destId
val router_req = Decoupled(new RouteComputerReq)
val router_resp = Input(new RouteComputerResp(outParams, egressParams))
val vcalloc_req = Decoupled(new VCAllocReq(cParam, outParams, egressParams))
val vcalloc_resp = Input(new VCAllocResp(outParams, egressParams))
val out_credit_available = Input(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }))
val salloc_req = Vec(cParam.destSpeedup, Decoupled(new SwitchAllocReq(outParams, egressParams)))
val out = Vec(cParam.destSpeedup, Valid(new SwitchBundle(outParams, egressParams)))
val debug = Output(new Bundle {
val va_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
val sa_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
})
val block = Input(Bool())
}
abstract class AbstractInputUnit(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams]
)(implicit val p: Parameters) extends Module with HasRouterOutputParams with HasNoCParams {
val nodeId = cParam.destId
def io: AbstractInputUnitIO
}
class InputBuffer(cParam: ChannelParams)(implicit p: Parameters) extends Module {
val nVirtualChannels = cParam.nVirtualChannels
val io = IO(new Bundle {
val enq = Flipped(Vec(cParam.srcSpeedup, Valid(new Flit(cParam.payloadBits))))
val deq = Vec(cParam.nVirtualChannels, Decoupled(new BaseFlit(cParam.payloadBits)))
})
val useOutputQueues = cParam.useOutputQueues
val delims = if (useOutputQueues) {
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize else 0).scanLeft(0)(_+_)
} else {
// If no queuing, have to add an additional slot since head == tail implies empty
// TODO this should be fixed, should use all slots available
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize + 1 else 0).scanLeft(0)(_+_)
}
val starts = delims.dropRight(1).zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val ends = delims.tail.zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val fullSize = delims.last
// Ugly case. Use multiple queues
if ((cParam.srcSpeedup > 1 || cParam.destSpeedup > 1 || fullSize <= 1) || !cParam.unifiedBuffer) {
require(useOutputQueues)
val qs = cParam.virtualChannelParams.map(v => Module(new Queue(new BaseFlit(cParam.payloadBits), v.bufferSize)))
qs.zipWithIndex.foreach { case (q,i) =>
val sel = io.enq.map(f => f.valid && f.bits.virt_channel_id === i.U)
q.io.enq.valid := sel.orR
q.io.enq.bits.head := Mux1H(sel, io.enq.map(_.bits.head))
q.io.enq.bits.tail := Mux1H(sel, io.enq.map(_.bits.tail))
q.io.enq.bits.payload := Mux1H(sel, io.enq.map(_.bits.payload))
io.deq(i) <> q.io.deq
}
} else {
val mem = Mem(fullSize, new BaseFlit(cParam.payloadBits))
val heads = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val tails = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val empty = (heads zip tails).map(t => t._1 === t._2)
val qs = Seq.fill(nVirtualChannels) { Module(new Queue(new BaseFlit(cParam.payloadBits), 1, pipe=true)) }
qs.foreach(_.io.enq.valid := false.B)
qs.foreach(_.io.enq.bits := DontCare)
val vc_sel = UIntToOH(io.enq(0).bits.virt_channel_id)
val flit = Wire(new BaseFlit(cParam.payloadBits))
val direct_to_q = (Mux1H(vc_sel, qs.map(_.io.enq.ready)) && Mux1H(vc_sel, empty)) && useOutputQueues.B
flit.head := io.enq(0).bits.head
flit.tail := io.enq(0).bits.tail
flit.payload := io.enq(0).bits.payload
when (io.enq(0).valid && !direct_to_q) {
val tail = tails(io.enq(0).bits.virt_channel_id)
mem.write(tail, flit)
tails(io.enq(0).bits.virt_channel_id) := Mux(
tail === Mux1H(vc_sel, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(vc_sel, starts.map(_.U)),
tail + 1.U)
} .elsewhen (io.enq(0).valid && direct_to_q) {
for (i <- 0 until nVirtualChannels) {
when (io.enq(0).bits.virt_channel_id === i.U) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := flit
}
}
}
if (useOutputQueues) {
val can_to_q = (0 until nVirtualChannels).map { i => !empty(i) && qs(i).io.enq.ready }
val to_q_oh = PriorityEncoderOH(can_to_q)
val to_q = OHToUInt(to_q_oh)
when (can_to_q.orR) {
val head = Mux1H(to_q_oh, heads)
heads(to_q) := Mux(
head === Mux1H(to_q_oh, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(to_q_oh, starts.map(_.U)),
head + 1.U)
for (i <- 0 until nVirtualChannels) {
when (to_q_oh(i)) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := mem.read(head)
}
}
}
for (i <- 0 until nVirtualChannels) {
io.deq(i) <> qs(i).io.deq
}
} else {
qs.map(_.io.deq.ready := false.B)
val ready_sel = io.deq.map(_.ready)
val fire = io.deq.map(_.fire)
assert(PopCount(fire) <= 1.U)
val head = Mux1H(fire, heads)
when (fire.orR) {
val fire_idx = OHToUInt(fire)
heads(fire_idx) := Mux(
head === Mux1H(fire, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(fire, starts.map(_.U)),
head + 1.U)
}
val read_flit = mem.read(head)
for (i <- 0 until nVirtualChannels) {
io.deq(i).valid := !empty(i)
io.deq(i).bits := read_flit
}
}
}
}
class InputUnit(cParam: ChannelParams, outParams: Seq[ChannelParams],
egressParams: Seq[EgressChannelParams],
combineRCVA: Boolean, combineSAST: Boolean
)
(implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) {
val nVirtualChannels = cParam.nVirtualChannels
val virtualChannelParams = cParam.virtualChannelParams
class InputUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) {
val in = Flipped(new Channel(cParam.asInstanceOf[ChannelParams]))
}
val io = IO(new InputUnitIO)
val g_i :: g_r :: g_v :: g_a :: g_c :: Nil = Enum(5)
class InputState extends Bundle {
val g = UInt(3.W)
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
val flow = new FlowRoutingBundle
val fifo_deps = UInt(nVirtualChannels.W)
}
val input_buffer = Module(new InputBuffer(cParam))
for (i <- 0 until cParam.srcSpeedup) {
input_buffer.io.enq(i) := io.in.flit(i)
}
input_buffer.io.deq.foreach(_.ready := false.B)
val route_arbiter = Module(new Arbiter(
new RouteComputerReq, nVirtualChannels
))
io.router_req <> route_arbiter.io.out
val states = Reg(Vec(nVirtualChannels, new InputState))
val anyFifo = cParam.possibleFlows.map(_.fifo).reduce(_||_)
val allFifo = cParam.possibleFlows.map(_.fifo).reduce(_&&_)
if (anyFifo) {
val idle_mask = VecInit(states.map(_.g === g_i)).asUInt
for (s <- states)
for (i <- 0 until nVirtualChannels)
s.fifo_deps := s.fifo_deps & ~idle_mask
}
for (i <- 0 until cParam.srcSpeedup) {
when (io.in.flit(i).fire && io.in.flit(i).bits.head) {
val id = io.in.flit(i).bits.virt_channel_id
assert(id < nVirtualChannels.U)
assert(states(id).g === g_i)
val at_dest = io.in.flit(i).bits.flow.egress_node === nodeId.U
states(id).g := Mux(at_dest, g_v, g_r)
states(id).vc_sel.foreach(_.foreach(_ := false.B))
for (o <- 0 until nEgress) {
when (o.U === io.in.flit(i).bits.flow.egress_node_id) {
states(id).vc_sel(o+nOutputs)(0) := true.B
}
}
states(id).flow := io.in.flit(i).bits.flow
if (anyFifo) {
val fifo = cParam.possibleFlows.filter(_.fifo).map(_.isFlow(io.in.flit(i).bits.flow)).toSeq.orR
states(id).fifo_deps := VecInit(states.zipWithIndex.map { case (s, j) =>
s.g =/= g_i && s.flow.asUInt === io.in.flit(i).bits.flow.asUInt && j.U =/= id
}).asUInt
}
}
}
(route_arbiter.io.in zip states).zipWithIndex.map { case ((i,s),idx) =>
if (virtualChannelParams(idx).traversable) {
i.valid := s.g === g_r
i.bits.flow := s.flow
i.bits.src_virt_id := idx.U
when (i.fire) { s.g := g_v }
} else {
i.valid := false.B
i.bits := DontCare
}
}
when (io.router_req.fire) {
val id = io.router_req.bits.src_virt_id
assert(states(id).g === g_r)
states(id).g := g_v
for (i <- 0 until nVirtualChannels) {
when (i.U === id) {
states(i).vc_sel := io.router_resp.vc_sel
}
}
}
val mask = RegInit(0.U(nVirtualChannels.W))
val vcalloc_reqs = Wire(Vec(nVirtualChannels, new VCAllocReq(cParam, outParams, egressParams)))
val vcalloc_vals = Wire(Vec(nVirtualChannels, Bool()))
val vcalloc_filter = PriorityEncoderOH(Cat(vcalloc_vals.asUInt, vcalloc_vals.asUInt & ~mask))
val vcalloc_sel = vcalloc_filter(nVirtualChannels-1,0) | (vcalloc_filter >> nVirtualChannels)
// Prioritize incoming packetes
when (io.router_req.fire) {
mask := (1.U << io.router_req.bits.src_virt_id) - 1.U
} .elsewhen (vcalloc_vals.orR) {
mask := Mux1H(vcalloc_sel, (0 until nVirtualChannels).map { w => ~(0.U((w+1).W)) })
}
io.vcalloc_req.valid := vcalloc_vals.orR
io.vcalloc_req.bits := Mux1H(vcalloc_sel, vcalloc_reqs)
states.zipWithIndex.map { case (s,idx) =>
if (virtualChannelParams(idx).traversable) {
vcalloc_vals(idx) := s.g === g_v && s.fifo_deps === 0.U
vcalloc_reqs(idx).in_vc := idx.U
vcalloc_reqs(idx).vc_sel := s.vc_sel
vcalloc_reqs(idx).flow := s.flow
when (vcalloc_vals(idx) && vcalloc_sel(idx) && io.vcalloc_req.ready) { s.g := g_a }
if (combineRCVA) {
when (route_arbiter.io.in(idx).fire) {
vcalloc_vals(idx) := true.B
vcalloc_reqs(idx).vc_sel := io.router_resp.vc_sel
}
}
} else {
vcalloc_vals(idx) := false.B
vcalloc_reqs(idx) := DontCare
}
}
io.debug.va_stall := PopCount(vcalloc_vals) - io.vcalloc_req.ready
when (io.vcalloc_req.fire) {
for (i <- 0 until nVirtualChannels) {
when (vcalloc_sel(i)) {
states(i).vc_sel := io.vcalloc_resp.vc_sel
states(i).g := g_a
if (!combineRCVA) {
assert(states(i).g === g_v)
}
}
}
}
val salloc_arb = Module(new SwitchArbiter(
nVirtualChannels,
cParam.destSpeedup,
outParams, egressParams
))
(states zip salloc_arb.io.in).zipWithIndex.map { case ((s,r),i) =>
if (virtualChannelParams(i).traversable) {
val credit_available = (s.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U
r.valid := s.g === g_a && credit_available && input_buffer.io.deq(i).valid
r.bits.vc_sel := s.vc_sel
val deq_tail = input_buffer.io.deq(i).bits.tail
r.bits.tail := deq_tail
when (r.fire && deq_tail) {
s.g := g_i
}
input_buffer.io.deq(i).ready := r.ready
} else {
r.valid := false.B
r.bits := DontCare
}
}
io.debug.sa_stall := PopCount(salloc_arb.io.in.map(r => r.valid && !r.ready))
io.salloc_req <> salloc_arb.io.out
when (io.block) {
salloc_arb.io.out.foreach(_.ready := false.B)
io.salloc_req.foreach(_.valid := false.B)
}
class OutBundle extends Bundle {
val valid = Bool()
val vid = UInt(virtualChannelBits.W)
val out_vid = UInt(log2Up(allOutParams.map(_.nVirtualChannels).max).W)
val flit = new Flit(cParam.payloadBits)
}
val salloc_outs = if (combineSAST) {
Wire(Vec(cParam.destSpeedup, new OutBundle))
} else {
Reg(Vec(cParam.destSpeedup, new OutBundle))
}
io.in.credit_return := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire, salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
io.in.vc_free := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire && Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)),
salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
for (i <- 0 until cParam.destSpeedup) {
val salloc_out = salloc_outs(i)
salloc_out.valid := salloc_arb.io.out(i).fire
salloc_out.vid := OHToUInt(salloc_arb.io.chosen_oh(i))
val vc_sel = Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.vc_sel))
val channel_oh = vc_sel.map(_.reduce(_||_)).toSeq
val virt_channel = Mux1H(channel_oh, vc_sel.map(v => OHToUInt(v)).toSeq)
when (salloc_arb.io.out(i).fire) {
salloc_out.out_vid := virt_channel
salloc_out.flit.payload := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.payload))
salloc_out.flit.head := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.head))
salloc_out.flit.tail := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail))
salloc_out.flit.flow := Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.flow))
} .otherwise {
salloc_out.out_vid := DontCare
salloc_out.flit := DontCare
}
salloc_out.flit.virt_channel_id := DontCare // this gets set in the switch
io.out(i).valid := salloc_out.valid
io.out(i).bits.flit := salloc_out.flit
io.out(i).bits.out_virt_channel := salloc_out.out_vid
}
def filterVCSel(sel: MixedVec[Vec[Bool]], srcV: Int) = {
if (virtualChannelParams(srcV).traversable) {
outParams.zipWithIndex.map { case (oP, oI) =>
(0 until oP.nVirtualChannels).map { oV =>
var allow = false
virtualChannelParams(srcV).possibleFlows.foreach { pI =>
allow = allow || routingRelation(
cParam.channelRoutingInfos(srcV),
oP.channelRoutingInfos(oV),
pI
)
}
if (!allow)
sel(oI)(oV) := false.B
}
}
}
}
(0 until nVirtualChannels).map { i =>
if (!virtualChannelParams(i).traversable) states(i) := DontCare
filterVCSel(states(i).vc_sel, i)
}
when (reset.asBool) {
states.foreach(_.g := g_i)
}
}
| module InputUnit_52( // @[InputUnit.scala:158:7]
input clock, // @[InputUnit.scala:158:7]
input reset, // @[InputUnit.scala:158:7]
output [4:0] io_router_req_bits_src_virt_id, // @[InputUnit.scala:170:14]
output [3:0] io_router_req_bits_flow_vnet_id, // @[InputUnit.scala:170:14]
output [5:0] io_router_req_bits_flow_ingress_node, // @[InputUnit.scala:170:14]
output [2:0] io_router_req_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14]
output [5:0] io_router_req_bits_flow_egress_node, // @[InputUnit.scala:170:14]
output [2:0] io_router_req_bits_flow_egress_node_id, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_1_12, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_1_13, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_1_16, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_1_17, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_1_20, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_1_21, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_12, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_13, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_16, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_17, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_20, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_21, // @[InputUnit.scala:170:14]
input io_vcalloc_req_ready, // @[InputUnit.scala:170:14]
output io_vcalloc_req_valid, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_1_12, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_1_13, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_1_16, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_1_17, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_1_20, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_1_21, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_12, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_13, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_16, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_17, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_20, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_21, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_1_12, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_1_13, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_1_16, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_1_17, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_1_20, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_1_21, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_12, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_13, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_16, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_17, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_20, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_21, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_10, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_11, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_14, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_15, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_18, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_19, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_20, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_21, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_12, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_13, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_16, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_17, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_20, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_21, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_12, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_13, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_16, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_17, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_20, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_21, // @[InputUnit.scala:170:14]
input io_salloc_req_0_ready, // @[InputUnit.scala:170:14]
output io_salloc_req_0_valid, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_10, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_11, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_12, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_13, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_14, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_15, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_16, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_17, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_18, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_19, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_20, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_21, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_10, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_11, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_12, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_13, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_14, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_15, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_16, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_17, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_18, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_19, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_20, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_21, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_10, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_11, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_12, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_13, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_14, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_15, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_16, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_17, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_18, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_19, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_20, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_21, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_tail, // @[InputUnit.scala:170:14]
output io_out_0_valid, // @[InputUnit.scala:170:14]
output io_out_0_bits_flit_head, // @[InputUnit.scala:170:14]
output io_out_0_bits_flit_tail, // @[InputUnit.scala:170:14]
output [72:0] io_out_0_bits_flit_payload, // @[InputUnit.scala:170:14]
output [3:0] io_out_0_bits_flit_flow_vnet_id, // @[InputUnit.scala:170:14]
output [5:0] io_out_0_bits_flit_flow_ingress_node, // @[InputUnit.scala:170:14]
output [2:0] io_out_0_bits_flit_flow_ingress_node_id, // @[InputUnit.scala:170:14]
output [5:0] io_out_0_bits_flit_flow_egress_node, // @[InputUnit.scala:170:14]
output [2:0] io_out_0_bits_flit_flow_egress_node_id, // @[InputUnit.scala:170:14]
output [4:0] io_out_0_bits_out_virt_channel, // @[InputUnit.scala:170:14]
output [4:0] io_debug_va_stall, // @[InputUnit.scala:170:14]
output [4:0] io_debug_sa_stall, // @[InputUnit.scala:170:14]
input io_in_flit_0_valid, // @[InputUnit.scala:170:14]
input io_in_flit_0_bits_head, // @[InputUnit.scala:170:14]
input io_in_flit_0_bits_tail, // @[InputUnit.scala:170:14]
input [72:0] io_in_flit_0_bits_payload, // @[InputUnit.scala:170:14]
input [3:0] io_in_flit_0_bits_flow_vnet_id, // @[InputUnit.scala:170:14]
input [5:0] io_in_flit_0_bits_flow_ingress_node, // @[InputUnit.scala:170:14]
input [2:0] io_in_flit_0_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14]
input [5:0] io_in_flit_0_bits_flow_egress_node, // @[InputUnit.scala:170:14]
input [2:0] io_in_flit_0_bits_flow_egress_node_id, // @[InputUnit.scala:170:14]
input [4:0] io_in_flit_0_bits_virt_channel_id, // @[InputUnit.scala:170:14]
output [21:0] io_in_credit_return, // @[InputUnit.scala:170:14]
output [21:0] io_in_vc_free // @[InputUnit.scala:170:14]
);
wire vcalloc_vals_21; // @[InputUnit.scala:266:32]
wire vcalloc_vals_20; // @[InputUnit.scala:266:32]
wire vcalloc_vals_17; // @[InputUnit.scala:266:32]
wire vcalloc_vals_16; // @[InputUnit.scala:266:32]
wire vcalloc_vals_13; // @[InputUnit.scala:266:32]
wire vcalloc_vals_12; // @[InputUnit.scala:266:32]
wire _salloc_arb_io_in_12_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_13_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_16_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_17_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_20_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_21_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_out_0_valid; // @[InputUnit.scala:296:26]
wire [21:0] _salloc_arb_io_chosen_oh_0; // @[InputUnit.scala:296:26]
wire _route_arbiter_io_in_12_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_13_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_16_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_17_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_20_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_21_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_out_valid; // @[InputUnit.scala:187:29]
wire [4:0] _route_arbiter_io_out_bits_src_virt_id; // @[InputUnit.scala:187:29]
wire _input_buffer_io_deq_0_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_0_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_0_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_1_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_2_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_2_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_2_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_3_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_3_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_3_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_4_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_4_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_4_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_5_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_5_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_5_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_6_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_6_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_6_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_7_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_7_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_7_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_8_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_8_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_8_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_9_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_9_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_9_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_10_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_10_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_10_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_11_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_11_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_11_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_12_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_12_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_12_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_12_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_13_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_13_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_13_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_13_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_14_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_14_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_14_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_15_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_15_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_15_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_16_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_16_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_16_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_16_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_17_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_17_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_17_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_17_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_18_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_18_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_18_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_19_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_19_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_19_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_20_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_20_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_20_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_20_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_21_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_21_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_21_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_21_bits_payload; // @[InputUnit.scala:181:28]
reg [2:0] states_12_g; // @[InputUnit.scala:192:19]
reg states_12_vc_sel_1_12; // @[InputUnit.scala:192:19]
reg states_12_vc_sel_1_13; // @[InputUnit.scala:192:19]
reg states_12_vc_sel_1_20; // @[InputUnit.scala:192:19]
reg states_12_vc_sel_1_21; // @[InputUnit.scala:192:19]
reg states_12_vc_sel_0_12; // @[InputUnit.scala:192:19]
reg states_12_vc_sel_0_13; // @[InputUnit.scala:192:19]
reg states_12_vc_sel_0_20; // @[InputUnit.scala:192:19]
reg states_12_vc_sel_0_21; // @[InputUnit.scala:192:19]
reg [3:0] states_12_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [5:0] states_12_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_12_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [5:0] states_12_flow_egress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_12_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_13_g; // @[InputUnit.scala:192:19]
reg states_13_vc_sel_1_12; // @[InputUnit.scala:192:19]
reg states_13_vc_sel_1_13; // @[InputUnit.scala:192:19]
reg states_13_vc_sel_1_20; // @[InputUnit.scala:192:19]
reg states_13_vc_sel_1_21; // @[InputUnit.scala:192:19]
reg states_13_vc_sel_0_12; // @[InputUnit.scala:192:19]
reg states_13_vc_sel_0_13; // @[InputUnit.scala:192:19]
reg states_13_vc_sel_0_20; // @[InputUnit.scala:192:19]
reg states_13_vc_sel_0_21; // @[InputUnit.scala:192:19]
reg [3:0] states_13_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [5:0] states_13_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_13_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [5:0] states_13_flow_egress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_13_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_16_g; // @[InputUnit.scala:192:19]
reg states_16_vc_sel_1_16; // @[InputUnit.scala:192:19]
reg states_16_vc_sel_1_17; // @[InputUnit.scala:192:19]
reg states_16_vc_sel_1_20; // @[InputUnit.scala:192:19]
reg states_16_vc_sel_1_21; // @[InputUnit.scala:192:19]
reg states_16_vc_sel_0_16; // @[InputUnit.scala:192:19]
reg states_16_vc_sel_0_17; // @[InputUnit.scala:192:19]
reg states_16_vc_sel_0_20; // @[InputUnit.scala:192:19]
reg states_16_vc_sel_0_21; // @[InputUnit.scala:192:19]
reg [3:0] states_16_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [5:0] states_16_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_16_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [5:0] states_16_flow_egress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_16_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_17_g; // @[InputUnit.scala:192:19]
reg states_17_vc_sel_1_16; // @[InputUnit.scala:192:19]
reg states_17_vc_sel_1_17; // @[InputUnit.scala:192:19]
reg states_17_vc_sel_1_20; // @[InputUnit.scala:192:19]
reg states_17_vc_sel_1_21; // @[InputUnit.scala:192:19]
reg states_17_vc_sel_0_16; // @[InputUnit.scala:192:19]
reg states_17_vc_sel_0_17; // @[InputUnit.scala:192:19]
reg states_17_vc_sel_0_20; // @[InputUnit.scala:192:19]
reg states_17_vc_sel_0_21; // @[InputUnit.scala:192:19]
reg [3:0] states_17_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [5:0] states_17_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_17_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [5:0] states_17_flow_egress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_17_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_20_g; // @[InputUnit.scala:192:19]
reg states_20_vc_sel_1_12; // @[InputUnit.scala:192:19]
reg states_20_vc_sel_1_13; // @[InputUnit.scala:192:19]
reg states_20_vc_sel_1_16; // @[InputUnit.scala:192:19]
reg states_20_vc_sel_1_17; // @[InputUnit.scala:192:19]
reg states_20_vc_sel_1_20; // @[InputUnit.scala:192:19]
reg states_20_vc_sel_1_21; // @[InputUnit.scala:192:19]
reg states_20_vc_sel_0_12; // @[InputUnit.scala:192:19]
reg states_20_vc_sel_0_13; // @[InputUnit.scala:192:19]
reg states_20_vc_sel_0_16; // @[InputUnit.scala:192:19]
reg states_20_vc_sel_0_17; // @[InputUnit.scala:192:19]
reg states_20_vc_sel_0_20; // @[InputUnit.scala:192:19]
reg states_20_vc_sel_0_21; // @[InputUnit.scala:192:19]
reg [3:0] states_20_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [5:0] states_20_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_20_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [5:0] states_20_flow_egress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_20_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_21_g; // @[InputUnit.scala:192:19]
reg states_21_vc_sel_1_12; // @[InputUnit.scala:192:19]
reg states_21_vc_sel_1_13; // @[InputUnit.scala:192:19]
reg states_21_vc_sel_1_16; // @[InputUnit.scala:192:19]
reg states_21_vc_sel_1_17; // @[InputUnit.scala:192:19]
reg states_21_vc_sel_1_20; // @[InputUnit.scala:192:19]
reg states_21_vc_sel_1_21; // @[InputUnit.scala:192:19]
reg states_21_vc_sel_0_12; // @[InputUnit.scala:192:19]
reg states_21_vc_sel_0_13; // @[InputUnit.scala:192:19]
reg states_21_vc_sel_0_16; // @[InputUnit.scala:192:19]
reg states_21_vc_sel_0_17; // @[InputUnit.scala:192:19]
reg states_21_vc_sel_0_20; // @[InputUnit.scala:192:19]
reg states_21_vc_sel_0_21; // @[InputUnit.scala:192:19]
reg [3:0] states_21_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [5:0] states_21_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_21_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [5:0] states_21_flow_egress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_21_flow_egress_node_id; // @[InputUnit.scala:192:19]
wire _GEN = io_in_flit_0_valid & io_in_flit_0_bits_head; // @[InputUnit.scala:205:30]
wire route_arbiter_io_in_12_valid = states_12_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_13_valid = states_13_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_16_valid = states_16_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_17_valid = states_17_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_20_valid = states_20_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_21_valid = states_21_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
reg [21:0] mask; // @[InputUnit.scala:250:21]
wire [21:0] _vcalloc_filter_T_3 = {vcalloc_vals_21, vcalloc_vals_20, 2'h0, vcalloc_vals_17, vcalloc_vals_16, 2'h0, vcalloc_vals_13, vcalloc_vals_12, 12'h0} & ~mask; // @[InputUnit.scala:250:21, :253:{80,87,89}, :266:32]
wire [43:0] vcalloc_filter = _vcalloc_filter_T_3[0] ? 44'h1 : _vcalloc_filter_T_3[1] ? 44'h2 : _vcalloc_filter_T_3[2] ? 44'h4 : _vcalloc_filter_T_3[3] ? 44'h8 : _vcalloc_filter_T_3[4] ? 44'h10 : _vcalloc_filter_T_3[5] ? 44'h20 : _vcalloc_filter_T_3[6] ? 44'h40 : _vcalloc_filter_T_3[7] ? 44'h80 : _vcalloc_filter_T_3[8] ? 44'h100 : _vcalloc_filter_T_3[9] ? 44'h200 : _vcalloc_filter_T_3[10] ? 44'h400 : _vcalloc_filter_T_3[11] ? 44'h800 : _vcalloc_filter_T_3[12] ? 44'h1000 : _vcalloc_filter_T_3[13] ? 44'h2000 : _vcalloc_filter_T_3[14] ? 44'h4000 : _vcalloc_filter_T_3[15] ? 44'h8000 : _vcalloc_filter_T_3[16] ? 44'h10000 : _vcalloc_filter_T_3[17] ? 44'h20000 : _vcalloc_filter_T_3[18] ? 44'h40000 : _vcalloc_filter_T_3[19] ? 44'h80000 : _vcalloc_filter_T_3[20] ? 44'h100000 : _vcalloc_filter_T_3[21] ? 44'h200000 : vcalloc_vals_12 ? 44'h400000000 : vcalloc_vals_13 ? 44'h800000000 : vcalloc_vals_16 ? 44'h4000000000 : vcalloc_vals_17 ? 44'h8000000000 : vcalloc_vals_20 ? 44'h40000000000 : {vcalloc_vals_21, 43'h0}; // @[OneHot.scala:85:71]
wire [21:0] vcalloc_sel = vcalloc_filter[21:0] | vcalloc_filter[43:22]; // @[Mux.scala:50:70]
wire io_vcalloc_req_valid_0 = vcalloc_vals_12 | vcalloc_vals_13 | vcalloc_vals_16 | vcalloc_vals_17 | vcalloc_vals_20 | vcalloc_vals_21; // @[package.scala:81:59]
assign vcalloc_vals_12 = states_12_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_13 = states_13_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_16 = states_16_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_17 = states_17_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_20 = states_20_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_21 = states_21_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
wire _GEN_0 = io_vcalloc_req_ready & io_vcalloc_req_valid_0; // @[Decoupled.scala:51:35]
wire _GEN_1 = _GEN_0 & vcalloc_sel[12]; // @[Mux.scala:32:36]
wire _GEN_2 = _GEN_0 & vcalloc_sel[13]; // @[Mux.scala:32:36]
wire _GEN_3 = _GEN_0 & vcalloc_sel[16]; // @[Mux.scala:32:36]
wire _GEN_4 = _GEN_0 & vcalloc_sel[17]; // @[Mux.scala:32:36]
wire _GEN_5 = _GEN_0 & vcalloc_sel[20]; // @[Mux.scala:32:36]
wire _GEN_6 = _GEN_0 & vcalloc_sel[21]; // @[Mux.scala:32:36] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_42( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [5:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [5:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire [12:0] _GEN = {10'h0, io_in_a_bits_size}; // @[package.scala:243:71]
wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [5:0] source; // @[Monitor.scala:390:22]
reg [31:0] address; // @[Monitor.scala:391:22]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [5:0] source_1; // @[Monitor.scala:541:22]
reg denied; // @[Monitor.scala:543:22]
reg [57:0] inflight; // @[Monitor.scala:614:27]
reg [231:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [231:0] inflight_sizes; // @[Monitor.scala:618:33]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire [63:0] _GEN_0 = {58'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35]
wire _GEN_1 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35]
wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46]
wire _GEN_2 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74]
wire [63:0] _GEN_3 = {58'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
reg [57:0] inflight_1; // @[Monitor.scala:726:35]
reg [231:0] inflight_sizes_1; // @[Monitor.scala:728:35]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File BankBinder.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressSet, TransferSizes}
case class BankBinderNode(mask: BigInt)(implicit valName: ValName) extends TLCustomNode
{
private val bit = mask & -mask
val maxXfer = TransferSizes(1, if (bit == 0 || bit > 4096) 4096 else bit.toInt)
val ids = AddressSet.enumerateMask(mask)
def resolveStar(iKnown: Int, oKnown: Int, iStars: Int, oStars: Int): (Int, Int) = {
val ports = ids.size
val oStar = if (oStars == 0) 0 else (ports - oKnown) / oStars
val iStar = if (iStars == 0) 0 else (ports - iKnown) / iStars
require (ports == iKnown + iStar*iStars, s"${name} must have ${ports} inputs, but has ${iKnown} + ${iStar}*${iStars} (at ${lazyModule.line})")
require (ports == oKnown + oStar*oStars, s"${name} must have ${ports} outputs, but has ${oKnown} + ${oStar}*${oStars} (at ${lazyModule.line})")
(iStar, oStar)
}
def mapParamsD(n: Int, p: Seq[TLMasterPortParameters]): Seq[TLMasterPortParameters] =
(p zip ids) map { case (cp, id) => cp.v1copy(clients = cp.clients.map { c => c.v1copy(
visibility = c.visibility.flatMap { a => a.intersect(AddressSet(id, ~mask))},
supportsProbe = c.supports.probe intersect maxXfer,
supportsArithmetic = c.supports.arithmetic intersect maxXfer,
supportsLogical = c.supports.logical intersect maxXfer,
supportsGet = c.supports.get intersect maxXfer,
supportsPutFull = c.supports.putFull intersect maxXfer,
supportsPutPartial = c.supports.putPartial intersect maxXfer,
supportsHint = c.supports.hint intersect maxXfer)})}
def mapParamsU(n: Int, p: Seq[TLSlavePortParameters]): Seq[TLSlavePortParameters] =
(p zip ids) map { case (mp, id) => mp.v1copy(managers = mp.managers.flatMap { m =>
val addresses = m.address.flatMap(a => a.intersect(AddressSet(id, ~mask)))
if (addresses.nonEmpty)
Some(m.v1copy(
address = addresses,
supportsAcquireT = m.supportsAcquireT intersect maxXfer,
supportsAcquireB = m.supportsAcquireB intersect maxXfer,
supportsArithmetic = m.supportsArithmetic intersect maxXfer,
supportsLogical = m.supportsLogical intersect maxXfer,
supportsGet = m.supportsGet intersect maxXfer,
supportsPutFull = m.supportsPutFull intersect maxXfer,
supportsPutPartial = m.supportsPutPartial intersect maxXfer,
supportsHint = m.supportsHint intersect maxXfer))
else None
})}
}
/* A BankBinder is used to divide contiguous memory regions into banks, suitable for a cache */
class BankBinder(mask: BigInt)(implicit p: Parameters) extends LazyModule
{
val node = BankBinderNode(mask)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out <> in
}
}
}
object BankBinder
{
def apply(mask: BigInt)(implicit p: Parameters): TLNode = {
val binder = LazyModule(new BankBinder(mask))
binder.node
}
def apply(nBanks: Int, granularity: Int)(implicit p: Parameters): TLNode = {
if (nBanks > 0) apply(granularity * (nBanks-1))
else TLTempNode()
}
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module BankBinder( // @[BankBinder.scala:61:9]
input clock, // @[BankBinder.scala:61:9]
input reset, // @[BankBinder.scala:61:9]
output auto_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_corrupt // @[LazyModuleImp.scala:107:25]
);
wire auto_in_a_valid_0 = auto_in_a_valid; // @[BankBinder.scala:61:9]
wire [2:0] auto_in_a_bits_opcode_0 = auto_in_a_bits_opcode; // @[BankBinder.scala:61:9]
wire [2:0] auto_in_a_bits_param_0 = auto_in_a_bits_param; // @[BankBinder.scala:61:9]
wire [2:0] auto_in_a_bits_size_0 = auto_in_a_bits_size; // @[BankBinder.scala:61:9]
wire [3:0] auto_in_a_bits_source_0 = auto_in_a_bits_source; // @[BankBinder.scala:61:9]
wire [31:0] auto_in_a_bits_address_0 = auto_in_a_bits_address; // @[BankBinder.scala:61:9]
wire [7:0] auto_in_a_bits_mask_0 = auto_in_a_bits_mask; // @[BankBinder.scala:61:9]
wire [63:0] auto_in_a_bits_data_0 = auto_in_a_bits_data; // @[BankBinder.scala:61:9]
wire auto_in_a_bits_corrupt_0 = auto_in_a_bits_corrupt; // @[BankBinder.scala:61:9]
wire auto_in_d_ready_0 = auto_in_d_ready; // @[BankBinder.scala:61:9]
wire auto_out_a_ready_0 = auto_out_a_ready; // @[BankBinder.scala:61:9]
wire auto_out_d_valid_0 = auto_out_d_valid; // @[BankBinder.scala:61:9]
wire [2:0] auto_out_d_bits_opcode_0 = auto_out_d_bits_opcode; // @[BankBinder.scala:61:9]
wire [1:0] auto_out_d_bits_param_0 = auto_out_d_bits_param; // @[BankBinder.scala:61:9]
wire [2:0] auto_out_d_bits_size_0 = auto_out_d_bits_size; // @[BankBinder.scala:61:9]
wire [3:0] auto_out_d_bits_source_0 = auto_out_d_bits_source; // @[BankBinder.scala:61:9]
wire auto_out_d_bits_sink_0 = auto_out_d_bits_sink; // @[BankBinder.scala:61:9]
wire auto_out_d_bits_denied_0 = auto_out_d_bits_denied; // @[BankBinder.scala:61:9]
wire [63:0] auto_out_d_bits_data_0 = auto_out_d_bits_data; // @[BankBinder.scala:61:9]
wire auto_out_d_bits_corrupt_0 = auto_out_d_bits_corrupt; // @[BankBinder.scala:61:9]
wire nodeIn_a_ready; // @[MixedNode.scala:551:17]
wire nodeIn_a_valid = auto_in_a_valid_0; // @[BankBinder.scala:61:9]
wire [2:0] nodeIn_a_bits_opcode = auto_in_a_bits_opcode_0; // @[BankBinder.scala:61:9]
wire [2:0] nodeIn_a_bits_param = auto_in_a_bits_param_0; // @[BankBinder.scala:61:9]
wire [2:0] nodeIn_a_bits_size = auto_in_a_bits_size_0; // @[BankBinder.scala:61:9]
wire [3:0] nodeIn_a_bits_source = auto_in_a_bits_source_0; // @[BankBinder.scala:61:9]
wire [31:0] nodeIn_a_bits_address = auto_in_a_bits_address_0; // @[BankBinder.scala:61:9]
wire [7:0] nodeIn_a_bits_mask = auto_in_a_bits_mask_0; // @[BankBinder.scala:61:9]
wire [63:0] nodeIn_a_bits_data = auto_in_a_bits_data_0; // @[BankBinder.scala:61:9]
wire nodeIn_a_bits_corrupt = auto_in_a_bits_corrupt_0; // @[BankBinder.scala:61:9]
wire nodeIn_d_ready = auto_in_d_ready_0; // @[BankBinder.scala:61:9]
wire nodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] nodeIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [3:0] nodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] nodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire nodeOut_a_ready = auto_out_a_ready_0; // @[BankBinder.scala:61:9]
wire nodeOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] nodeOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire nodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire nodeOut_d_ready; // @[MixedNode.scala:542:17]
wire nodeOut_d_valid = auto_out_d_valid_0; // @[BankBinder.scala:61:9]
wire [2:0] nodeOut_d_bits_opcode = auto_out_d_bits_opcode_0; // @[BankBinder.scala:61:9]
wire [1:0] nodeOut_d_bits_param = auto_out_d_bits_param_0; // @[BankBinder.scala:61:9]
wire [2:0] nodeOut_d_bits_size = auto_out_d_bits_size_0; // @[BankBinder.scala:61:9]
wire [3:0] nodeOut_d_bits_source = auto_out_d_bits_source_0; // @[BankBinder.scala:61:9]
wire nodeOut_d_bits_sink = auto_out_d_bits_sink_0; // @[BankBinder.scala:61:9]
wire nodeOut_d_bits_denied = auto_out_d_bits_denied_0; // @[BankBinder.scala:61:9]
wire [63:0] nodeOut_d_bits_data = auto_out_d_bits_data_0; // @[BankBinder.scala:61:9]
wire nodeOut_d_bits_corrupt = auto_out_d_bits_corrupt_0; // @[BankBinder.scala:61:9]
wire auto_in_a_ready_0; // @[BankBinder.scala:61:9]
wire [2:0] auto_in_d_bits_opcode_0; // @[BankBinder.scala:61:9]
wire [1:0] auto_in_d_bits_param_0; // @[BankBinder.scala:61:9]
wire [2:0] auto_in_d_bits_size_0; // @[BankBinder.scala:61:9]
wire [3:0] auto_in_d_bits_source_0; // @[BankBinder.scala:61:9]
wire auto_in_d_bits_sink_0; // @[BankBinder.scala:61:9]
wire auto_in_d_bits_denied_0; // @[BankBinder.scala:61:9]
wire [63:0] auto_in_d_bits_data_0; // @[BankBinder.scala:61:9]
wire auto_in_d_bits_corrupt_0; // @[BankBinder.scala:61:9]
wire auto_in_d_valid_0; // @[BankBinder.scala:61:9]
wire [2:0] auto_out_a_bits_opcode_0; // @[BankBinder.scala:61:9]
wire [2:0] auto_out_a_bits_param_0; // @[BankBinder.scala:61:9]
wire [2:0] auto_out_a_bits_size_0; // @[BankBinder.scala:61:9]
wire [3:0] auto_out_a_bits_source_0; // @[BankBinder.scala:61:9]
wire [31:0] auto_out_a_bits_address_0; // @[BankBinder.scala:61:9]
wire [7:0] auto_out_a_bits_mask_0; // @[BankBinder.scala:61:9]
wire [63:0] auto_out_a_bits_data_0; // @[BankBinder.scala:61:9]
wire auto_out_a_bits_corrupt_0; // @[BankBinder.scala:61:9]
wire auto_out_a_valid_0; // @[BankBinder.scala:61:9]
wire auto_out_d_ready_0; // @[BankBinder.scala:61:9]
assign auto_in_a_ready_0 = nodeIn_a_ready; // @[BankBinder.scala:61:9]
assign nodeOut_a_valid = nodeIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_a_bits_opcode = nodeIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_a_bits_param = nodeIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_a_bits_size = nodeIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_a_bits_source = nodeIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_a_bits_address = nodeIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_a_bits_mask = nodeIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_a_bits_data = nodeIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_a_bits_corrupt = nodeIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_d_ready = nodeIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign auto_in_d_valid_0 = nodeIn_d_valid; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_opcode_0 = nodeIn_d_bits_opcode; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_param_0 = nodeIn_d_bits_param; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_size_0 = nodeIn_d_bits_size; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_source_0 = nodeIn_d_bits_source; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_sink_0 = nodeIn_d_bits_sink; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_denied_0 = nodeIn_d_bits_denied; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_data_0 = nodeIn_d_bits_data; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_corrupt_0 = nodeIn_d_bits_corrupt; // @[BankBinder.scala:61:9]
assign nodeIn_a_ready = nodeOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign auto_out_a_valid_0 = nodeOut_a_valid; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_opcode_0 = nodeOut_a_bits_opcode; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_param_0 = nodeOut_a_bits_param; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_size_0 = nodeOut_a_bits_size; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_source_0 = nodeOut_a_bits_source; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_address_0 = nodeOut_a_bits_address; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_mask_0 = nodeOut_a_bits_mask; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_data_0 = nodeOut_a_bits_data; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_corrupt_0 = nodeOut_a_bits_corrupt; // @[BankBinder.scala:61:9]
assign auto_out_d_ready_0 = nodeOut_d_ready; // @[BankBinder.scala:61:9]
assign nodeIn_d_valid = nodeOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_d_bits_opcode = nodeOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_d_bits_param = nodeOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_d_bits_size = nodeOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_d_bits_source = nodeOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_d_bits_sink = nodeOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_d_bits_denied = nodeOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_d_bits_data = nodeOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_d_bits_corrupt = nodeOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
TLMonitor_55 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (nodeIn_a_ready), // @[MixedNode.scala:551:17]
.io_in_a_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_in_a_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_a_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_in_a_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_in_a_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_in_a_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_in_a_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_in_a_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_in_a_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_d_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_in_d_valid (nodeIn_d_valid), // @[MixedNode.scala:551:17]
.io_in_d_bits_opcode (nodeIn_d_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_d_bits_param (nodeIn_d_bits_param), // @[MixedNode.scala:551:17]
.io_in_d_bits_size (nodeIn_d_bits_size), // @[MixedNode.scala:551:17]
.io_in_d_bits_source (nodeIn_d_bits_source), // @[MixedNode.scala:551:17]
.io_in_d_bits_sink (nodeIn_d_bits_sink), // @[MixedNode.scala:551:17]
.io_in_d_bits_denied (nodeIn_d_bits_denied), // @[MixedNode.scala:551:17]
.io_in_d_bits_data (nodeIn_d_bits_data), // @[MixedNode.scala:551:17]
.io_in_d_bits_corrupt (nodeIn_d_bits_corrupt) // @[MixedNode.scala:551:17]
); // @[Nodes.scala:27:25]
assign auto_in_a_ready = auto_in_a_ready_0; // @[BankBinder.scala:61:9]
assign auto_in_d_valid = auto_in_d_valid_0; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_opcode = auto_in_d_bits_opcode_0; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_param = auto_in_d_bits_param_0; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_size = auto_in_d_bits_size_0; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_source = auto_in_d_bits_source_0; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_sink = auto_in_d_bits_sink_0; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_denied = auto_in_d_bits_denied_0; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_data = auto_in_d_bits_data_0; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_corrupt = auto_in_d_bits_corrupt_0; // @[BankBinder.scala:61:9]
assign auto_out_a_valid = auto_out_a_valid_0; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_opcode = auto_out_a_bits_opcode_0; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_param = auto_out_a_bits_param_0; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_size = auto_out_a_bits_size_0; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_source = auto_out_a_bits_source_0; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_address = auto_out_a_bits_address_0; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_mask = auto_out_a_bits_mask_0; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_data = auto_out_a_bits_data_0; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_corrupt = auto_out_a_bits_corrupt_0; // @[BankBinder.scala:61:9]
assign auto_out_d_ready = auto_out_d_ready_0; // @[BankBinder.scala:61:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_1( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_5 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_166( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
output io_q // @[ShiftReg.scala:36:14]
);
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire io_d = 1'h1; // @[SynchronizerReg.scala:80:7, :87:41]
wire _output_T_1 = 1'h1; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_298 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module PE_261( // @[PE.scala:31:7]
input clock, // @[PE.scala:31:7]
input reset, // @[PE.scala:31:7]
input [7:0] io_in_a, // @[PE.scala:35:14]
input [19:0] io_in_b, // @[PE.scala:35:14]
input [19:0] io_in_d, // @[PE.scala:35:14]
output [7:0] io_out_a, // @[PE.scala:35:14]
output [19:0] io_out_b, // @[PE.scala:35:14]
output [19:0] io_out_c, // @[PE.scala:35:14]
input io_in_control_dataflow, // @[PE.scala:35:14]
input io_in_control_propagate, // @[PE.scala:35:14]
input [4:0] io_in_control_shift, // @[PE.scala:35:14]
output io_out_control_dataflow, // @[PE.scala:35:14]
output io_out_control_propagate, // @[PE.scala:35:14]
output [4:0] io_out_control_shift, // @[PE.scala:35:14]
input [2:0] io_in_id, // @[PE.scala:35:14]
output [2:0] io_out_id, // @[PE.scala:35:14]
input io_in_last, // @[PE.scala:35:14]
output io_out_last, // @[PE.scala:35:14]
input io_in_valid, // @[PE.scala:35:14]
output io_out_valid, // @[PE.scala:35:14]
output io_bad_dataflow // @[PE.scala:35:14]
);
wire [19:0] _mac_unit_io_out_d; // @[PE.scala:64:24]
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:31:7]
wire [19:0] io_in_b_0 = io_in_b; // @[PE.scala:31:7]
wire [19:0] io_in_d_0 = io_in_d; // @[PE.scala:31:7]
wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7]
wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7]
wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7]
wire [2:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7]
wire io_in_last_0 = io_in_last; // @[PE.scala:31:7]
wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7]
wire io_bad_dataflow_0 = 1'h0; // @[PE.scala:31:7]
wire [7:0] io_out_a_0 = io_in_a_0; // @[PE.scala:31:7]
wire [19:0] _mac_unit_io_in_b_T = io_in_b_0; // @[PE.scala:31:7, :106:37]
wire [19:0] _mac_unit_io_in_b_T_2 = io_in_b_0; // @[PE.scala:31:7, :113:37]
wire [19:0] _mac_unit_io_in_b_T_8 = io_in_b_0; // @[PE.scala:31:7, :137:35]
wire [19:0] c1_lo_1 = io_in_d_0; // @[PE.scala:31:7]
wire [19:0] c2_lo_1 = io_in_d_0; // @[PE.scala:31:7]
wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7]
wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7]
wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7]
wire [2:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7]
wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7]
wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7]
wire [19:0] io_out_b_0; // @[PE.scala:31:7]
wire [19:0] io_out_c_0; // @[PE.scala:31:7]
reg [31:0] c1; // @[PE.scala:70:15]
wire [31:0] _io_out_c_zeros_T_1 = c1; // @[PE.scala:70:15]
wire [31:0] _mac_unit_io_in_b_T_6 = c1; // @[PE.scala:70:15, :127:38]
reg [31:0] c2; // @[PE.scala:71:15]
wire [31:0] _io_out_c_zeros_T_10 = c2; // @[PE.scala:71:15]
wire [31:0] _mac_unit_io_in_b_T_4 = c2; // @[PE.scala:71:15, :121:38]
reg last_s; // @[PE.scala:89:25]
wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21]
wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25]
wire _GEN = shift_offset == 5'h0; // @[PE.scala:91:25]
wire _io_out_c_point_five_T; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T = _GEN; // @[Arithmetic.scala:101:32]
wire _io_out_c_point_five_T_5; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T_5 = _GEN; // @[Arithmetic.scala:101:32]
wire [5:0] _GEN_0 = {1'h0, shift_offset} - 6'h1; // @[PE.scala:91:25]
wire [5:0] _io_out_c_point_five_T_1; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_1 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_2; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_2 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [5:0] _io_out_c_point_five_T_6; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_6 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_11; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_11 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [4:0] _io_out_c_point_five_T_2 = _io_out_c_point_five_T_1[4:0]; // @[Arithmetic.scala:101:53]
wire [31:0] _io_out_c_point_five_T_3 = $signed($signed(c1) >>> _io_out_c_point_five_T_2); // @[PE.scala:70:15]
wire _io_out_c_point_five_T_4 = _io_out_c_point_five_T_3[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five = ~_io_out_c_point_five_T & _io_out_c_point_five_T_4; // @[Arithmetic.scala:101:{29,32,50}]
wire _GEN_1 = shift_offset < 5'h2; // @[PE.scala:91:25]
wire _io_out_c_zeros_T; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T = _GEN_1; // @[Arithmetic.scala:102:27]
wire _io_out_c_zeros_T_9; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T_9 = _GEN_1; // @[Arithmetic.scala:102:27]
wire [4:0] _io_out_c_zeros_T_3 = _io_out_c_zeros_T_2[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_4 = 32'h1 << _io_out_c_zeros_T_3; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_5 = {1'h0, _io_out_c_zeros_T_4} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_6 = _io_out_c_zeros_T_5[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_7 = _io_out_c_zeros_T_1 & _io_out_c_zeros_T_6; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_8 = _io_out_c_zeros_T ? 32'h0 : _io_out_c_zeros_T_7; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros = |_io_out_c_zeros_T_8; // @[Arithmetic.scala:102:{24,89}]
wire [31:0] _GEN_2 = {27'h0, shift_offset}; // @[PE.scala:91:25]
wire [31:0] _GEN_3 = $signed($signed(c1) >>> _GEN_2); // @[PE.scala:70:15]
wire [31:0] _io_out_c_ones_digit_T; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T = _GEN_3; // @[Arithmetic.scala:103:30]
wire [31:0] _io_out_c_T; // @[Arithmetic.scala:107:15]
assign _io_out_c_T = _GEN_3; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit = _io_out_c_ones_digit_T[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T = io_out_c_zeros | io_out_c_ones_digit; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_1 = io_out_c_point_five & _io_out_c_r_T; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r = _io_out_c_r_T_1; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_1 = {1'h0, io_out_c_r}; // @[Arithmetic.scala:105:53, :107:33]
wire [32:0] _io_out_c_T_2 = {_io_out_c_T[31], _io_out_c_T} + {{31{_io_out_c_T_1[1]}}, _io_out_c_T_1}; // @[Arithmetic.scala:107:{15,28,33}]
wire [31:0] _io_out_c_T_3 = _io_out_c_T_2[31:0]; // @[Arithmetic.scala:107:28]
wire [31:0] _io_out_c_T_4 = _io_out_c_T_3; // @[Arithmetic.scala:107:28]
wire _io_out_c_T_5 = $signed(_io_out_c_T_4) > 32'sh7FFFF; // @[Arithmetic.scala:107:28, :125:33]
wire _io_out_c_T_6 = $signed(_io_out_c_T_4) < -32'sh80000; // @[Arithmetic.scala:107:28, :125:60]
wire [31:0] _io_out_c_T_7 = _io_out_c_T_6 ? 32'hFFF80000 : _io_out_c_T_4; // @[Mux.scala:126:16]
wire [31:0] _io_out_c_T_8 = _io_out_c_T_5 ? 32'h7FFFF : _io_out_c_T_7; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_9 = _io_out_c_T_8[19:0]; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_10 = _io_out_c_T_9; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_1 = _mac_unit_io_in_b_T; // @[PE.scala:106:37]
wire [7:0] _mac_unit_io_in_b_WIRE = _mac_unit_io_in_b_T_1[7:0]; // @[PE.scala:106:37]
wire c1_sign = io_in_d_0[19]; // @[PE.scala:31:7]
wire c2_sign = io_in_d_0[19]; // @[PE.scala:31:7]
wire [1:0] _GEN_4 = {2{c1_sign}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] c1_lo_lo_hi; // @[Arithmetic.scala:118:18]
assign c1_lo_lo_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [1:0] c1_lo_hi_hi; // @[Arithmetic.scala:118:18]
assign c1_lo_hi_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [1:0] c1_hi_lo_hi; // @[Arithmetic.scala:118:18]
assign c1_hi_lo_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [1:0] c1_hi_hi_hi; // @[Arithmetic.scala:118:18]
assign c1_hi_hi_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [2:0] c1_lo_lo = {c1_lo_lo_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c1_lo_hi = {c1_lo_hi_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c1_lo = {c1_lo_hi, c1_lo_lo}; // @[Arithmetic.scala:118:18]
wire [2:0] c1_hi_lo = {c1_hi_lo_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c1_hi_hi = {c1_hi_hi_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c1_hi = {c1_hi_hi, c1_hi_lo}; // @[Arithmetic.scala:118:18]
wire [11:0] _c1_T = {c1_hi, c1_lo}; // @[Arithmetic.scala:118:18]
wire [31:0] _c1_T_1 = {_c1_T, c1_lo_1}; // @[Arithmetic.scala:118:{14,18}]
wire [31:0] _c1_T_2 = _c1_T_1; // @[Arithmetic.scala:118:{14,61}]
wire [31:0] _c1_WIRE = _c1_T_2; // @[Arithmetic.scala:118:61]
wire [4:0] _io_out_c_point_five_T_7 = _io_out_c_point_five_T_6[4:0]; // @[Arithmetic.scala:101:53]
wire [31:0] _io_out_c_point_five_T_8 = $signed($signed(c2) >>> _io_out_c_point_five_T_7); // @[PE.scala:71:15]
wire _io_out_c_point_five_T_9 = _io_out_c_point_five_T_8[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five_1 = ~_io_out_c_point_five_T_5 & _io_out_c_point_five_T_9; // @[Arithmetic.scala:101:{29,32,50}]
wire [4:0] _io_out_c_zeros_T_12 = _io_out_c_zeros_T_11[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_13 = 32'h1 << _io_out_c_zeros_T_12; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_14 = {1'h0, _io_out_c_zeros_T_13} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_15 = _io_out_c_zeros_T_14[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_16 = _io_out_c_zeros_T_10 & _io_out_c_zeros_T_15; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_17 = _io_out_c_zeros_T_9 ? 32'h0 : _io_out_c_zeros_T_16; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros_1 = |_io_out_c_zeros_T_17; // @[Arithmetic.scala:102:{24,89}]
wire [31:0] _GEN_5 = $signed($signed(c2) >>> _GEN_2); // @[PE.scala:71:15]
wire [31:0] _io_out_c_ones_digit_T_1; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T_1 = _GEN_5; // @[Arithmetic.scala:103:30]
wire [31:0] _io_out_c_T_11; // @[Arithmetic.scala:107:15]
assign _io_out_c_T_11 = _GEN_5; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit_1 = _io_out_c_ones_digit_T_1[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T_2 = io_out_c_zeros_1 | io_out_c_ones_digit_1; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_3 = io_out_c_point_five_1 & _io_out_c_r_T_2; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r_1 = _io_out_c_r_T_3; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_12 = {1'h0, io_out_c_r_1}; // @[Arithmetic.scala:105:53, :107:33]
wire [32:0] _io_out_c_T_13 = {_io_out_c_T_11[31], _io_out_c_T_11} + {{31{_io_out_c_T_12[1]}}, _io_out_c_T_12}; // @[Arithmetic.scala:107:{15,28,33}]
wire [31:0] _io_out_c_T_14 = _io_out_c_T_13[31:0]; // @[Arithmetic.scala:107:28]
wire [31:0] _io_out_c_T_15 = _io_out_c_T_14; // @[Arithmetic.scala:107:28]
wire _io_out_c_T_16 = $signed(_io_out_c_T_15) > 32'sh7FFFF; // @[Arithmetic.scala:107:28, :125:33]
wire _io_out_c_T_17 = $signed(_io_out_c_T_15) < -32'sh80000; // @[Arithmetic.scala:107:28, :125:60]
wire [31:0] _io_out_c_T_18 = _io_out_c_T_17 ? 32'hFFF80000 : _io_out_c_T_15; // @[Mux.scala:126:16]
wire [31:0] _io_out_c_T_19 = _io_out_c_T_16 ? 32'h7FFFF : _io_out_c_T_18; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_20 = _io_out_c_T_19[19:0]; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_21 = _io_out_c_T_20; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_3 = _mac_unit_io_in_b_T_2; // @[PE.scala:113:37]
wire [7:0] _mac_unit_io_in_b_WIRE_1 = _mac_unit_io_in_b_T_3[7:0]; // @[PE.scala:113:37]
wire [1:0] _GEN_6 = {2{c2_sign}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] c2_lo_lo_hi; // @[Arithmetic.scala:118:18]
assign c2_lo_lo_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [1:0] c2_lo_hi_hi; // @[Arithmetic.scala:118:18]
assign c2_lo_hi_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [1:0] c2_hi_lo_hi; // @[Arithmetic.scala:118:18]
assign c2_hi_lo_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [1:0] c2_hi_hi_hi; // @[Arithmetic.scala:118:18]
assign c2_hi_hi_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [2:0] c2_lo_lo = {c2_lo_lo_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c2_lo_hi = {c2_lo_hi_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c2_lo = {c2_lo_hi, c2_lo_lo}; // @[Arithmetic.scala:118:18]
wire [2:0] c2_hi_lo = {c2_hi_lo_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c2_hi_hi = {c2_hi_hi_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c2_hi = {c2_hi_hi, c2_hi_lo}; // @[Arithmetic.scala:118:18]
wire [11:0] _c2_T = {c2_hi, c2_lo}; // @[Arithmetic.scala:118:18]
wire [31:0] _c2_T_1 = {_c2_T, c2_lo_1}; // @[Arithmetic.scala:118:{14,18}]
wire [31:0] _c2_T_2 = _c2_T_1; // @[Arithmetic.scala:118:{14,61}]
wire [31:0] _c2_WIRE = _c2_T_2; // @[Arithmetic.scala:118:61]
wire [31:0] _mac_unit_io_in_b_T_5 = _mac_unit_io_in_b_T_4; // @[PE.scala:121:38]
wire [7:0] _mac_unit_io_in_b_WIRE_2 = _mac_unit_io_in_b_T_5[7:0]; // @[PE.scala:121:38]
wire [31:0] _mac_unit_io_in_b_T_7 = _mac_unit_io_in_b_T_6; // @[PE.scala:127:38]
wire [7:0] _mac_unit_io_in_b_WIRE_3 = _mac_unit_io_in_b_T_7[7:0]; // @[PE.scala:127:38]
assign io_out_c_0 = io_in_control_dataflow_0 ? (io_in_control_propagate_0 ? c1[19:0] : c2[19:0]) : io_in_control_propagate_0 ? _io_out_c_T_10 : _io_out_c_T_21; // @[PE.scala:31:7, :70:15, :71:15, :102:95, :103:30, :104:16, :111:16, :118:101, :119:30, :120:16, :126:16]
assign io_out_b_0 = io_in_control_dataflow_0 ? _mac_unit_io_out_d : io_in_b_0; // @[PE.scala:31:7, :64:24, :102:95, :103:30, :118:101]
wire [19:0] _mac_unit_io_in_b_T_9 = _mac_unit_io_in_b_T_8; // @[PE.scala:137:35]
wire [7:0] _mac_unit_io_in_b_WIRE_4 = _mac_unit_io_in_b_T_9[7:0]; // @[PE.scala:137:35]
wire [31:0] _GEN_7 = {{12{io_in_d_0[19]}}, io_in_d_0}; // @[PE.scala:31:7, :124:10]
wire [31:0] _GEN_8 = {{12{_mac_unit_io_out_d[19]}}, _mac_unit_io_out_d}; // @[PE.scala:64:24, :108:10]
always @(posedge clock) begin // @[PE.scala:31:7]
if (io_in_valid_0) begin // @[PE.scala:31:7]
if (io_in_control_dataflow_0) begin // @[PE.scala:31:7]
if (io_in_control_dataflow_0 & io_in_control_propagate_0) // @[PE.scala:31:7, :70:15, :118:101, :119:30, :124:10]
c1 <= _GEN_7; // @[PE.scala:70:15, :124:10]
if (~io_in_control_dataflow_0 | io_in_control_propagate_0) begin // @[PE.scala:31:7, :71:15, :118:101, :119:30]
end
else // @[PE.scala:71:15, :118:101, :119:30]
c2 <= _GEN_7; // @[PE.scala:71:15, :124:10]
end
else begin // @[PE.scala:31:7]
c1 <= io_in_control_propagate_0 ? _c1_WIRE : _GEN_8; // @[PE.scala:31:7, :70:15, :103:30, :108:10, :109:10, :115:10]
c2 <= io_in_control_propagate_0 ? _GEN_8 : _c2_WIRE; // @[PE.scala:31:7, :71:15, :103:30, :108:10, :116:10]
end
last_s <= io_in_control_propagate_0; // @[PE.scala:31:7, :89:25]
end
always @(posedge)
MacUnit_5 mac_unit ( // @[PE.scala:64:24]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0), // @[PE.scala:31:7]
.io_in_b (io_in_control_dataflow_0 ? (io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE_2 : _mac_unit_io_in_b_WIRE_3) : io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE : _mac_unit_io_in_b_WIRE_1), // @[PE.scala:31:7, :102:95, :103:30, :106:{24,37}, :113:{24,37}, :118:101, :119:30, :121:{24,38}, :127:{24,38}]
.io_in_c (io_in_control_dataflow_0 ? {{12{io_in_b_0[19]}}, io_in_b_0} : io_in_control_propagate_0 ? c2 : c1), // @[PE.scala:31:7, :70:15, :71:15, :102:95, :103:30, :107:24, :114:24, :118:101, :122:24]
.io_out_d (_mac_unit_io_out_d)
); // @[PE.scala:64:24]
assign io_out_a = io_out_a_0; // @[PE.scala:31:7]
assign io_out_b = io_out_b_0; // @[PE.scala:31:7]
assign io_out_c = io_out_c_0; // @[PE.scala:31:7]
assign io_out_control_dataflow = io_out_control_dataflow_0; // @[PE.scala:31:7]
assign io_out_control_propagate = io_out_control_propagate_0; // @[PE.scala:31:7]
assign io_out_control_shift = io_out_control_shift_0; // @[PE.scala:31:7]
assign io_out_id = io_out_id_0; // @[PE.scala:31:7]
assign io_out_last = io_out_last_0; // @[PE.scala:31:7]
assign io_out_valid = io_out_valid_0; // @[PE.scala:31:7]
assign io_bad_dataflow = io_bad_dataflow_0; // @[PE.scala:31:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_32( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [27:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [27:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7]
wire _source_ok_T = 1'h0; // @[Parameters.scala:54:10]
wire _source_ok_T_6 = 1'h0; // @[Parameters.scala:54:10]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_source = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_source = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_source = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_source = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_source = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_source = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_source = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_source = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_source = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_source = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_source = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_source = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_source = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_source = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_source = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_source = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_source = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_source = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_source = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_source = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_source = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_source = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_source = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_source = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_source = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_source = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:54:67]
wire _source_ok_T_7 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_8 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:54:67]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_first_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_first_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_first_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_first_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_set_wo_ready_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_set_wo_ready_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_opcodes_set_interm_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_opcodes_set_interm_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_sizes_set_interm_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_sizes_set_interm_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_opcodes_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_opcodes_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_sizes_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_sizes_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_probe_ack_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_probe_ack_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_probe_ack_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_probe_ack_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _same_cycle_resp_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _same_cycle_resp_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _same_cycle_resp_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _same_cycle_resp_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _same_cycle_resp_WIRE_4_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _same_cycle_resp_WIRE_5_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [66:0] _c_opcodes_set_T_1 = 67'h0; // @[Monitor.scala:767:54]
wire [66:0] _c_sizes_set_T_1 = 67'h0; // @[Monitor.scala:768:52]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] c_set = 6'h0; // @[Monitor.scala:738:34]
wire [5:0] c_set_wo_ready = 6'h0; // @[Monitor.scala:739:34]
wire [5:0] _c_opcodes_set_T = 6'h0; // @[Monitor.scala:767:79]
wire [5:0] _c_sizes_set_T = 6'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [7:0] _c_set_wo_ready_T = 8'h1; // @[OneHot.scala:58:35]
wire [7:0] _c_set_T = 8'h1; // @[OneHot.scala:58:35]
wire [23:0] c_opcodes_set = 24'h0; // @[Monitor.scala:740:34]
wire [23:0] c_sizes_set = 24'h0; // @[Monitor.scala:741:34]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [2:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [2:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [2:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [2:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [2:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [2:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [2:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [2:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [2:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [2:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [2:0] _source_ok_uncommonBits_T_1 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [2:0] source_ok_uncommonBits = _source_ok_uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_4 = source_ok_uncommonBits[2:1] != 2'h3; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_5 = _source_ok_T_4; // @[Parameters.scala:56:48, :57:20]
wire _source_ok_WIRE_0 = _source_ok_T_5; // @[Parameters.scala:1138:31]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [27:0] _is_aligned_T = {22'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 28'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [2:0] uncommonBits = _uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_1 = _uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_2 = _uncommonBits_T_2; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_3 = _uncommonBits_T_3; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_4 = _uncommonBits_T_4; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_5 = _uncommonBits_T_5; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_6 = _uncommonBits_T_6; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_7 = _uncommonBits_T_7; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_8 = _uncommonBits_T_8; // @[Parameters.scala:52:{29,56}]
wire [2:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_10 = source_ok_uncommonBits_1[2:1] != 2'h3; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_11 = _source_ok_T_10; // @[Parameters.scala:56:48, :57:20]
wire _source_ok_WIRE_1_0 = _source_ok_T_11; // @[Parameters.scala:1138:31]
wire _T_672 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_672; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_672; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [2:0] source; // @[Monitor.scala:390:22]
reg [27:0] address; // @[Monitor.scala:391:22]
wire _T_745 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_745; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_745; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_745; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [2:0] source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [5:0] inflight; // @[Monitor.scala:614:27]
reg [23:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [23:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] a_set; // @[Monitor.scala:626:34]
wire [5:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [23:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [23:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [5:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [5:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [5:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [5:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [5:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [5:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [5:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [5:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [5:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [23:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [23:0] _a_opcode_lookup_T_6 = {20'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [23:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[23:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [23:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [23:0] _a_size_lookup_T_6 = {20'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [23:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[23:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [7:0] _GEN_2 = 8'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [7:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [7:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[5:0] : 6'h0; // @[OneHot.scala:58:35]
wire _T_598 = _T_672 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_598 ? _a_set_T[5:0] : 6'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_598 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_598 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [5:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [5:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [5:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [66:0] _a_opcodes_set_T_1 = {63'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_598 ? _a_opcodes_set_T_1[23:0] : 24'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [66:0] _a_sizes_set_T_1 = {63'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_598 ? _a_sizes_set_T_1[23:0] : 24'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [5:0] d_clr; // @[Monitor.scala:664:34]
wire [5:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [23:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [23:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_644 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [7:0] _GEN_5 = 8'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [7:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [7:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [7:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [7:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_644 & ~d_release_ack ? _d_clr_wo_ready_T[5:0] : 6'h0; // @[OneHot.scala:58:35]
wire _T_613 = _T_745 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_613 ? _d_clr_T[5:0] : 6'h0; // @[OneHot.scala:58:35]
wire [78:0] _d_opcodes_clr_T_5 = 79'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_613 ? _d_opcodes_clr_T_5[23:0] : 24'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [78:0] _d_sizes_clr_T_5 = 79'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_613 ? _d_sizes_clr_T_5[23:0] : 24'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [5:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [5:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [5:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [23:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [23:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [23:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [23:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [23:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [23:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [5:0] inflight_1; // @[Monitor.scala:726:35]
wire [5:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [23:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [23:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [23:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [23:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [23:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [23:0] _c_opcode_lookup_T_6 = {20'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [23:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[23:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [23:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [23:0] _c_size_lookup_T_6 = {20'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [23:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[23:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [5:0] d_clr_1; // @[Monitor.scala:774:34]
wire [5:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [23:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [23:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_716 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_716 & d_release_ack_1 ? _d_clr_wo_ready_T_1[5:0] : 6'h0; // @[OneHot.scala:58:35]
wire _T_698 = _T_745 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_698 ? _d_clr_T_1[5:0] : 6'h0; // @[OneHot.scala:58:35]
wire [78:0] _d_opcodes_clr_T_11 = 79'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_698 ? _d_opcodes_clr_T_11[23:0] : 24'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [78:0] _d_sizes_clr_T_11 = 79'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_698 ? _d_sizes_clr_T_11[23:0] : 24'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 3'h0; // @[Monitor.scala:36:7, :795:113]
wire [5:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [5:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [23:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [23:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [23:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [23:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
| module OptimizationBarrier_TLBEntryData_129( // @[package.scala:267:30]
input clock, // @[package.scala:267:30]
input reset, // @[package.scala:267:30]
input [19:0] io_x_ppn, // @[package.scala:268:18]
input io_x_u, // @[package.scala:268:18]
input io_x_g, // @[package.scala:268:18]
input io_x_ae_ptw, // @[package.scala:268:18]
input io_x_ae_final, // @[package.scala:268:18]
input io_x_ae_stage2, // @[package.scala:268:18]
input io_x_pf, // @[package.scala:268:18]
input io_x_gf, // @[package.scala:268:18]
input io_x_sw, // @[package.scala:268:18]
input io_x_sx, // @[package.scala:268:18]
input io_x_sr, // @[package.scala:268:18]
input io_x_hw, // @[package.scala:268:18]
input io_x_hx, // @[package.scala:268:18]
input io_x_hr, // @[package.scala:268:18]
input io_x_pw, // @[package.scala:268:18]
input io_x_px, // @[package.scala:268:18]
input io_x_pr, // @[package.scala:268:18]
input io_x_ppp, // @[package.scala:268:18]
input io_x_pal, // @[package.scala:268:18]
input io_x_paa, // @[package.scala:268:18]
input io_x_eff, // @[package.scala:268:18]
input io_x_c, // @[package.scala:268:18]
input io_x_fragmented_superpage, // @[package.scala:268:18]
output [19:0] io_y_ppn, // @[package.scala:268:18]
output io_y_u, // @[package.scala:268:18]
output io_y_ae_ptw, // @[package.scala:268:18]
output io_y_ae_final, // @[package.scala:268:18]
output io_y_ae_stage2, // @[package.scala:268:18]
output io_y_pf, // @[package.scala:268:18]
output io_y_gf, // @[package.scala:268:18]
output io_y_sw, // @[package.scala:268:18]
output io_y_sx, // @[package.scala:268:18]
output io_y_sr, // @[package.scala:268:18]
output io_y_hw, // @[package.scala:268:18]
output io_y_hx, // @[package.scala:268:18]
output io_y_hr, // @[package.scala:268:18]
output io_y_pw, // @[package.scala:268:18]
output io_y_px, // @[package.scala:268:18]
output io_y_pr, // @[package.scala:268:18]
output io_y_ppp, // @[package.scala:268:18]
output io_y_pal, // @[package.scala:268:18]
output io_y_paa, // @[package.scala:268:18]
output io_y_eff, // @[package.scala:268:18]
output io_y_c // @[package.scala:268:18]
);
wire [19:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30]
wire io_x_u_0 = io_x_u; // @[package.scala:267:30]
wire io_x_g_0 = io_x_g; // @[package.scala:267:30]
wire io_x_ae_ptw_0 = io_x_ae_ptw; // @[package.scala:267:30]
wire io_x_ae_final_0 = io_x_ae_final; // @[package.scala:267:30]
wire io_x_ae_stage2_0 = io_x_ae_stage2; // @[package.scala:267:30]
wire io_x_pf_0 = io_x_pf; // @[package.scala:267:30]
wire io_x_gf_0 = io_x_gf; // @[package.scala:267:30]
wire io_x_sw_0 = io_x_sw; // @[package.scala:267:30]
wire io_x_sx_0 = io_x_sx; // @[package.scala:267:30]
wire io_x_sr_0 = io_x_sr; // @[package.scala:267:30]
wire io_x_hw_0 = io_x_hw; // @[package.scala:267:30]
wire io_x_hx_0 = io_x_hx; // @[package.scala:267:30]
wire io_x_hr_0 = io_x_hr; // @[package.scala:267:30]
wire io_x_pw_0 = io_x_pw; // @[package.scala:267:30]
wire io_x_px_0 = io_x_px; // @[package.scala:267:30]
wire io_x_pr_0 = io_x_pr; // @[package.scala:267:30]
wire io_x_ppp_0 = io_x_ppp; // @[package.scala:267:30]
wire io_x_pal_0 = io_x_pal; // @[package.scala:267:30]
wire io_x_paa_0 = io_x_paa; // @[package.scala:267:30]
wire io_x_eff_0 = io_x_eff; // @[package.scala:267:30]
wire io_x_c_0 = io_x_c; // @[package.scala:267:30]
wire io_x_fragmented_superpage_0 = io_x_fragmented_superpage; // @[package.scala:267:30]
wire [19:0] io_y_ppn_0 = io_x_ppn_0; // @[package.scala:267:30]
wire io_y_u_0 = io_x_u_0; // @[package.scala:267:30]
wire io_y_g = io_x_g_0; // @[package.scala:267:30]
wire io_y_ae_ptw_0 = io_x_ae_ptw_0; // @[package.scala:267:30]
wire io_y_ae_final_0 = io_x_ae_final_0; // @[package.scala:267:30]
wire io_y_ae_stage2_0 = io_x_ae_stage2_0; // @[package.scala:267:30]
wire io_y_pf_0 = io_x_pf_0; // @[package.scala:267:30]
wire io_y_gf_0 = io_x_gf_0; // @[package.scala:267:30]
wire io_y_sw_0 = io_x_sw_0; // @[package.scala:267:30]
wire io_y_sx_0 = io_x_sx_0; // @[package.scala:267:30]
wire io_y_sr_0 = io_x_sr_0; // @[package.scala:267:30]
wire io_y_hw_0 = io_x_hw_0; // @[package.scala:267:30]
wire io_y_hx_0 = io_x_hx_0; // @[package.scala:267:30]
wire io_y_hr_0 = io_x_hr_0; // @[package.scala:267:30]
wire io_y_pw_0 = io_x_pw_0; // @[package.scala:267:30]
wire io_y_px_0 = io_x_px_0; // @[package.scala:267:30]
wire io_y_pr_0 = io_x_pr_0; // @[package.scala:267:30]
wire io_y_ppp_0 = io_x_ppp_0; // @[package.scala:267:30]
wire io_y_pal_0 = io_x_pal_0; // @[package.scala:267:30]
wire io_y_paa_0 = io_x_paa_0; // @[package.scala:267:30]
wire io_y_eff_0 = io_x_eff_0; // @[package.scala:267:30]
wire io_y_c_0 = io_x_c_0; // @[package.scala:267:30]
wire io_y_fragmented_superpage = io_x_fragmented_superpage_0; // @[package.scala:267:30]
assign io_y_ppn = io_y_ppn_0; // @[package.scala:267:30]
assign io_y_u = io_y_u_0; // @[package.scala:267:30]
assign io_y_ae_ptw = io_y_ae_ptw_0; // @[package.scala:267:30]
assign io_y_ae_final = io_y_ae_final_0; // @[package.scala:267:30]
assign io_y_ae_stage2 = io_y_ae_stage2_0; // @[package.scala:267:30]
assign io_y_pf = io_y_pf_0; // @[package.scala:267:30]
assign io_y_gf = io_y_gf_0; // @[package.scala:267:30]
assign io_y_sw = io_y_sw_0; // @[package.scala:267:30]
assign io_y_sx = io_y_sx_0; // @[package.scala:267:30]
assign io_y_sr = io_y_sr_0; // @[package.scala:267:30]
assign io_y_hw = io_y_hw_0; // @[package.scala:267:30]
assign io_y_hx = io_y_hx_0; // @[package.scala:267:30]
assign io_y_hr = io_y_hr_0; // @[package.scala:267:30]
assign io_y_pw = io_y_pw_0; // @[package.scala:267:30]
assign io_y_px = io_y_px_0; // @[package.scala:267:30]
assign io_y_pr = io_y_pr_0; // @[package.scala:267:30]
assign io_y_ppp = io_y_ppp_0; // @[package.scala:267:30]
assign io_y_pal = io_y_pal_0; // @[package.scala:267:30]
assign io_y_paa = io_y_paa_0; // @[package.scala:267:30]
assign io_y_eff = io_y_eff_0; // @[package.scala:267:30]
assign io_y_c = io_y_c_0; // @[package.scala:267:30]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_10( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [3:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [3:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [31:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7]
wire [3:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [3:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [8:0] c_first_beats1_decode = 9'h0; // @[Edges.scala:220:59]
wire [8:0] c_first_beats1 = 9'h0; // @[Edges.scala:221:14]
wire [8:0] _c_first_count_T = 9'h0; // @[Edges.scala:234:27]
wire [8:0] c_first_count = 9'h0; // @[Edges.scala:234:25]
wire [8:0] _c_first_counter_T = 9'h0; // @[Edges.scala:236:21]
wire _source_ok_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_4 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_8 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_10 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_14 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_16 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_20 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_22 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_28 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_30 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_34 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_36 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_40 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_42 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_46 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_48 = 1'h1; // @[Parameters.scala:57:20]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [8:0] c_first_counter1 = 9'h1FF; // @[Edges.scala:230:28]
wire [9:0] _c_first_counter1_T = 10'h3FF; // @[Edges.scala:230:28]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] c_opcodes_set = 64'h0; // @[Monitor.scala:740:34]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_first_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_first_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_first_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_first_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_set_wo_ready_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_set_wo_ready_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_opcodes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_opcodes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_sizes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_sizes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_opcodes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_opcodes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_sizes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_sizes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_probe_ack_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_probe_ack_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_probe_ack_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_probe_ack_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_4_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_5_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_first_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_first_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_first_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_first_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_first_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_first_WIRE_2_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_first_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_first_WIRE_3_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] _c_set_wo_ready_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_set_wo_ready_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_set_wo_ready_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_set_wo_ready_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_set_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_set_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_opcodes_set_interm_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_opcodes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_interm_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_sizes_set_interm_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_sizes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_sizes_set_interm_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_opcodes_set_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_opcodes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_sizes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_sizes_set_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_sizes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_sizes_set_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_probe_ack_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_probe_ack_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_probe_ack_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_probe_ack_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_probe_ack_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_probe_ack_WIRE_2_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_probe_ack_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_probe_ack_WIRE_3_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_2_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_3_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_4_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_5_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hFF; // @[Monitor.scala:612:57]
wire [15:0] _c_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hFF; // @[Monitor.scala:724:57]
wire [16:0] _a_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hFF; // @[Monitor.scala:612:57]
wire [16:0] _c_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hFF; // @[Monitor.scala:724:57]
wire [15:0] _a_size_lookup_T_3 = 16'h100; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h100; // @[Monitor.scala:612:51]
wire [15:0] _c_size_lookup_T_3 = 16'h100; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h100; // @[Monitor.scala:724:51]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [131:0] _c_sizes_set_T_1 = 132'h0; // @[Monitor.scala:768:52]
wire [6:0] _c_opcodes_set_T = 7'h0; // @[Monitor.scala:767:79]
wire [6:0] _c_sizes_set_T = 7'h0; // @[Monitor.scala:768:77]
wire [130:0] _c_opcodes_set_T_1 = 131'h0; // @[Monitor.scala:767:54]
wire [4:0] _c_sizes_set_interm_T_1 = 5'h1; // @[Monitor.scala:766:59]
wire [4:0] c_sizes_set_interm = 5'h0; // @[Monitor.scala:755:40]
wire [4:0] _c_sizes_set_interm_T = 5'h0; // @[Monitor.scala:766:51]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [15:0] _c_set_wo_ready_T = 16'h1; // @[OneHot.scala:58:35]
wire [15:0] _c_set_T = 16'h1; // @[OneHot.scala:58:35]
wire [127:0] c_sizes_set = 128'h0; // @[Monitor.scala:741:34]
wire [15:0] c_set = 16'h0; // @[Monitor.scala:738:34]
wire [15:0] c_set_wo_ready = 16'h0; // @[Monitor.scala:739:34]
wire [11:0] _c_first_beats1_decode_T_2 = 12'h0; // @[package.scala:243:46]
wire [11:0] _c_first_beats1_decode_T_1 = 12'hFFF; // @[package.scala:243:76]
wire [26:0] _c_first_beats1_decode_T = 27'hFFF; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_size_lookup_T_2 = 4'h8; // @[Monitor.scala:641:117]
wire [3:0] _d_sizes_clr_T = 4'h8; // @[Monitor.scala:681:48]
wire [3:0] _c_size_lookup_T_2 = 4'h8; // @[Monitor.scala:750:119]
wire [3:0] _d_sizes_clr_T_6 = 4'h8; // @[Monitor.scala:791:48]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [3:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_uncommonBits_T_4 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] _source_ok_T = io_in_a_bits_source_0[3:2]; // @[Monitor.scala:36:7]
wire [1:0] _source_ok_T_6 = io_in_a_bits_source_0[3:2]; // @[Monitor.scala:36:7]
wire [1:0] _source_ok_T_12 = io_in_a_bits_source_0[3:2]; // @[Monitor.scala:36:7]
wire [1:0] _source_ok_T_18 = io_in_a_bits_source_0[3:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_1 = _source_ok_T == 2'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_3 = _source_ok_T_1; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_5 = _source_ok_T_3; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_0 = _source_ok_T_5; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_7 = _source_ok_T_6 == 2'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_9 = _source_ok_T_7; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_11 = _source_ok_T_9; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1 = _source_ok_T_11; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_13 = _source_ok_T_12 == 2'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_15 = _source_ok_T_13; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_17 = _source_ok_T_15; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_2 = _source_ok_T_17; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_19 = &_source_ok_T_18; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_21 = _source_ok_T_19; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_23 = _source_ok_T_21; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_3 = _source_ok_T_23; // @[Parameters.scala:1138:31]
wire _source_ok_T_24 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_25 = _source_ok_T_24 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_25 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46]
wire [26:0] _GEN = 27'hFFF << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [26:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [26:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [26:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [11:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [31:0] _is_aligned_T = {20'h0, io_in_a_bits_address_0[11:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 32'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 4'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] _source_ok_T_26 = io_in_d_bits_source_0[3:2]; // @[Monitor.scala:36:7]
wire [1:0] _source_ok_T_32 = io_in_d_bits_source_0[3:2]; // @[Monitor.scala:36:7]
wire [1:0] _source_ok_T_38 = io_in_d_bits_source_0[3:2]; // @[Monitor.scala:36:7]
wire [1:0] _source_ok_T_44 = io_in_d_bits_source_0[3:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_27 = _source_ok_T_26 == 2'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_29 = _source_ok_T_27; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_31 = _source_ok_T_29; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_0 = _source_ok_T_31; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_33 = _source_ok_T_32 == 2'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_35 = _source_ok_T_33; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_37 = _source_ok_T_35; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_1 = _source_ok_T_37; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_39 = _source_ok_T_38 == 2'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_41 = _source_ok_T_39; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_43 = _source_ok_T_41; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_2 = _source_ok_T_43; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_45 = &_source_ok_T_44; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_47 = _source_ok_T_45; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_49 = _source_ok_T_47; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_3 = _source_ok_T_49; // @[Parameters.scala:1138:31]
wire _source_ok_T_50 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_51 = _source_ok_T_50 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_51 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46]
wire _T_1336 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_1336; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_1336; // @[Decoupled.scala:51:35]
wire [11:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [8:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [8:0] a_first_counter; // @[Edges.scala:229:27]
wire [9:0] _a_first_counter1_T = {1'h0, a_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] a_first_counter1 = _a_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [8:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [3:0] size; // @[Monitor.scala:389:22]
reg [3:0] source; // @[Monitor.scala:390:22]
reg [31:0] address; // @[Monitor.scala:391:22]
wire _T_1409 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1409; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1409; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1409; // @[Decoupled.scala:51:35]
wire [26:0] _GEN_0 = 27'hFFF << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [11:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [8:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T = {1'h0, d_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1 = _d_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [3:0] size_1; // @[Monitor.scala:540:22]
reg [3:0] source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [15:0] inflight; // @[Monitor.scala:614:27]
reg [63:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [127:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [11:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [8:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [8:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] a_first_counter1_1 = _a_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [8:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [11:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire [8:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1_1 = _d_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [15:0] a_set; // @[Monitor.scala:626:34]
wire [15:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [63:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [127:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [6:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [6:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [6:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [6:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [6:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [63:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [63:0] _a_opcode_lookup_T_6 = {60'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [63:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[63:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [7:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [6:0] _GEN_2 = {io_in_d_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :641:65]
wire [6:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65]
wire [6:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_2; // @[Monitor.scala:641:65, :681:99]
wire [6:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65, :750:67]
wire [6:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_2; // @[Monitor.scala:641:65, :791:99]
wire [127:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [127:0] _a_size_lookup_T_6 = {120'h0, _a_size_lookup_T_1[7:0]}; // @[Monitor.scala:641:{40,91}]
wire [127:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[127:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[7:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [4:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [15:0] _GEN_3 = {12'h0, io_in_a_bits_source_0}; // @[OneHot.scala:58:35]
wire [15:0] _GEN_4 = 16'h1 << _GEN_3; // @[OneHot.scala:58:35]
wire [15:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_4; // @[OneHot.scala:58:35]
wire [15:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_4; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T : 16'h0; // @[OneHot.scala:58:35]
wire _T_1262 = _T_1336 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_1262 ? _a_set_T : 16'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_1262 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [4:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [4:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[4:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_1262 ? _a_sizes_set_interm_T_1 : 5'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [6:0] _a_opcodes_set_T = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [130:0] _a_opcodes_set_T_1 = {127'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_1262 ? _a_opcodes_set_T_1[63:0] : 64'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [6:0] _a_sizes_set_T = {io_in_a_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :660:77]
wire [131:0] _a_sizes_set_T_1 = {127'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_1262 ? _a_sizes_set_T_1[127:0] : 128'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [15:0] d_clr; // @[Monitor.scala:664:34]
wire [15:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [63:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [127:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_5 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_5; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_5; // @[Monitor.scala:673:46, :783:46]
wire _T_1308 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [15:0] _GEN_6 = {12'h0, io_in_d_bits_source_0}; // @[OneHot.scala:58:35]
wire [15:0] _GEN_7 = 16'h1 << _GEN_6; // @[OneHot.scala:58:35]
wire [15:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_7; // @[OneHot.scala:58:35]
wire [15:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_7; // @[OneHot.scala:58:35]
wire [15:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_7; // @[OneHot.scala:58:35]
wire [15:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_7; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_1308 & ~d_release_ack ? _d_clr_wo_ready_T : 16'h0; // @[OneHot.scala:58:35]
wire _T_1277 = _T_1409 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_1277 ? _d_clr_T : 16'h0; // @[OneHot.scala:58:35]
wire [142:0] _d_opcodes_clr_T_5 = 143'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_1277 ? _d_opcodes_clr_T_5[63:0] : 64'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [142:0] _d_sizes_clr_T_5 = 143'hFF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_1277 ? _d_sizes_clr_T_5[127:0] : 128'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [15:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [15:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [15:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [63:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [63:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [63:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [127:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [127:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [127:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [15:0] inflight_1; // @[Monitor.scala:726:35]
wire [15:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [63:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [63:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [127:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [127:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [11:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[11:3]; // @[package.scala:243:46]
wire [8:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1_2 = _d_first_counter1_T_2[8:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [7:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [63:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [63:0] _c_opcode_lookup_T_6 = {60'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [63:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[63:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [127:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [127:0] _c_size_lookup_T_6 = {120'h0, _c_size_lookup_T_1[7:0]}; // @[Monitor.scala:750:{42,93}]
wire [127:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[127:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[7:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [15:0] d_clr_1; // @[Monitor.scala:774:34]
wire [15:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [63:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [127:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_1380 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_1380 & d_release_ack_1 ? _d_clr_wo_ready_T_1 : 16'h0; // @[OneHot.scala:58:35]
wire _T_1362 = _T_1409 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_1362 ? _d_clr_T_1 : 16'h0; // @[OneHot.scala:58:35]
wire [142:0] _d_opcodes_clr_T_11 = 143'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_1362 ? _d_opcodes_clr_T_11[63:0] : 64'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [142:0] _d_sizes_clr_T_11 = 143'hFF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_1362 ? _d_sizes_clr_T_11[127:0] : 128'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 4'h0; // @[Monitor.scala:36:7, :795:113]
wire [15:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [15:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [63:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [63:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [127:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [127:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File ClockDomain.scala:
package freechips.rocketchip.prci
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
abstract class Domain(implicit p: Parameters) extends LazyModule with HasDomainCrossing
{
def clockBundle: ClockBundle
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
childClock := clockBundle.clock
childReset := clockBundle.reset
override def provideImplicitClockToLazyChildren = true
// these are just for backwards compatibility with external devices
// that were manually wiring themselves to the domain's clock/reset input:
val clock = IO(Output(chiselTypeOf(clockBundle.clock)))
val reset = IO(Output(chiselTypeOf(clockBundle.reset)))
clock := clockBundle.clock
reset := clockBundle.reset
}
}
abstract class ClockDomain(implicit p: Parameters) extends Domain with HasClockDomainCrossing
class ClockSinkDomain(val clockSinkParams: ClockSinkParameters)(implicit p: Parameters) extends ClockDomain
{
def this(take: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSinkParameters(take = take, name = name))
val clockNode = ClockSinkNode(Seq(clockSinkParams))
def clockBundle = clockNode.in.head._1
override lazy val desiredName = (clockSinkParams.name.toSeq :+ "ClockSinkDomain").mkString
}
class ClockSourceDomain(val clockSourceParams: ClockSourceParameters)(implicit p: Parameters) extends ClockDomain
{
def this(give: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSourceParameters(give = give, name = name))
val clockNode = ClockSourceNode(Seq(clockSourceParams))
def clockBundle = clockNode.out.head._1
override lazy val desiredName = (clockSourceParams.name.toSeq :+ "ClockSourceDomain").mkString
}
abstract class ResetDomain(implicit p: Parameters) extends Domain with HasResetDomainCrossing
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File NoC.scala:
package constellation.noc
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp, BundleBridgeSink, InModuleBody}
import freechips.rocketchip.util.ElaborationArtefacts
import freechips.rocketchip.prci._
import constellation.router._
import constellation.channel._
import constellation.routing.{RoutingRelation, ChannelRoutingInfo}
import constellation.topology.{PhysicalTopology, UnidirectionalLine}
class NoCTerminalIO(
val ingressParams: Seq[IngressChannelParams],
val egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Bundle {
val ingress = MixedVec(ingressParams.map { u => Flipped(new IngressChannel(u)) })
val egress = MixedVec(egressParams.map { u => new EgressChannel(u) })
}
class NoC(nocParams: NoCParams)(implicit p: Parameters) extends LazyModule {
override def shouldBeInlined = nocParams.inlineNoC
val internalParams = InternalNoCParams(nocParams)
val allChannelParams = internalParams.channelParams
val allIngressParams = internalParams.ingressParams
val allEgressParams = internalParams.egressParams
val allRouterParams = internalParams.routerParams
val iP = p.alterPartial({ case InternalNoCKey => internalParams })
val nNodes = nocParams.topology.nNodes
val nocName = nocParams.nocName
val skipValidationChecks = nocParams.skipValidationChecks
val clockSourceNodes = Seq.tabulate(nNodes) { i => ClockSourceNode(Seq(ClockSourceParameters())) }
val router_sink_domains = Seq.tabulate(nNodes) { i =>
val router_sink_domain = LazyModule(new ClockSinkDomain(ClockSinkParameters(
name = Some(s"${nocName}_router_$i")
)))
router_sink_domain.clockNode := clockSourceNodes(i)
router_sink_domain
}
val routers = Seq.tabulate(nNodes) { i => router_sink_domains(i) {
val inParams = allChannelParams.filter(_.destId == i).map(
_.copy(payloadBits=allRouterParams(i).user.payloadBits)
)
val outParams = allChannelParams.filter(_.srcId == i).map(
_.copy(payloadBits=allRouterParams(i).user.payloadBits)
)
val ingressParams = allIngressParams.filter(_.destId == i).map(
_.copy(payloadBits=allRouterParams(i).user.payloadBits)
)
val egressParams = allEgressParams.filter(_.srcId == i).map(
_.copy(payloadBits=allRouterParams(i).user.payloadBits)
)
val noIn = inParams.size + ingressParams.size == 0
val noOut = outParams.size + egressParams.size == 0
if (noIn || noOut) {
println(s"Constellation WARNING: $nocName router $i seems to be unused, it will not be generated")
None
} else {
Some(LazyModule(new Router(
routerParams = allRouterParams(i),
preDiplomaticInParams = inParams,
preDiplomaticIngressParams = ingressParams,
outDests = outParams.map(_.destId),
egressIds = egressParams.map(_.egressId)
)(iP)))
}
}}.flatten
val ingressNodes = allIngressParams.map { u => IngressChannelSourceNode(u.destId) }
val egressNodes = allEgressParams.map { u => EgressChannelDestNode(u) }
// Generate channels between routers diplomatically
Seq.tabulate(nNodes, nNodes) { case (i, j) => if (i != j) {
val routerI = routers.find(_.nodeId == i)
val routerJ = routers.find(_.nodeId == j)
if (routerI.isDefined && routerJ.isDefined) {
val sourceNodes: Seq[ChannelSourceNode] = routerI.get.sourceNodes.filter(_.destId == j)
val destNodes: Seq[ChannelDestNode] = routerJ.get.destNodes.filter(_.destParams.srcId == i)
require (sourceNodes.size == destNodes.size)
(sourceNodes zip destNodes).foreach { case (src, dst) =>
val channelParam = allChannelParams.find(c => c.srcId == i && c.destId == j).get
router_sink_domains(j) {
implicit val p: Parameters = iP
(dst
:= ChannelWidthWidget(routerJ.get.payloadBits, routerI.get.payloadBits)
:= channelParam.channelGen(p)(src)
)
}
}
}
}}
// Generate terminal channels diplomatically
routers.foreach { dst => router_sink_domains(dst.nodeId) {
implicit val p: Parameters = iP
dst.ingressNodes.foreach(n => {
val ingressId = n.destParams.ingressId
require(dst.payloadBits <= allIngressParams(ingressId).payloadBits)
(n
:= IngressWidthWidget(dst.payloadBits, allIngressParams(ingressId).payloadBits)
:= ingressNodes(ingressId)
)
})
dst.egressNodes.foreach(n => {
val egressId = n.egressId
require(dst.payloadBits <= allEgressParams(egressId).payloadBits)
(egressNodes(egressId)
:= EgressWidthWidget(allEgressParams(egressId).payloadBits, dst.payloadBits)
:= n
)
})
}}
val debugNodes = routers.map { r =>
val sink = BundleBridgeSink[DebugBundle]()
sink := r.debugNode
sink
}
val ctrlNodes = if (nocParams.hasCtrl) {
(0 until nNodes).map { i =>
routers.find(_.nodeId == i).map { r =>
val sink = BundleBridgeSink[RouterCtrlBundle]()
sink := r.ctrlNode.get
sink
}
}
} else {
Nil
}
println(s"Constellation: $nocName Finished parameter validation")
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
println(s"Constellation: $nocName Starting NoC RTL generation")
val io = IO(new NoCTerminalIO(allIngressParams, allEgressParams)(iP) {
val router_clocks = Vec(nNodes, Input(new ClockBundle(ClockBundleParameters())))
val router_ctrl = if (nocParams.hasCtrl) Vec(nNodes, new RouterCtrlBundle) else Nil
})
(io.ingress zip ingressNodes.map(_.out(0)._1)).foreach { case (l,r) => r <> l }
(io.egress zip egressNodes .map(_.in (0)._1)).foreach { case (l,r) => l <> r }
(io.router_clocks zip clockSourceNodes.map(_.out(0)._1)).foreach { case (l,r) => l <> r }
if (nocParams.hasCtrl) {
ctrlNodes.zipWithIndex.map { case (c,i) =>
if (c.isDefined) {
io.router_ctrl(i) <> c.get.in(0)._1
} else {
io.router_ctrl(i) <> DontCare
}
}
}
// TODO: These assume a single clock-domain across the entire noc
val debug_va_stall_ctr = RegInit(0.U(64.W))
val debug_sa_stall_ctr = RegInit(0.U(64.W))
val debug_any_stall_ctr = debug_va_stall_ctr + debug_sa_stall_ctr
debug_va_stall_ctr := debug_va_stall_ctr + debugNodes.map(_.in(0)._1.va_stall.reduce(_+_)).reduce(_+_)
debug_sa_stall_ctr := debug_sa_stall_ctr + debugNodes.map(_.in(0)._1.sa_stall.reduce(_+_)).reduce(_+_)
dontTouch(debug_va_stall_ctr)
dontTouch(debug_sa_stall_ctr)
dontTouch(debug_any_stall_ctr)
def prepend(s: String) = Seq(nocName, s).mkString(".")
ElaborationArtefacts.add(prepend("noc.graphml"), graphML)
val adjList = routers.map { r =>
val outs = r.outParams.map(o => s"${o.destId}").mkString(" ")
val egresses = r.egressParams.map(e => s"e${e.egressId}").mkString(" ")
val ingresses = r.ingressParams.map(i => s"i${i.ingressId} ${r.nodeId}")
(Seq(s"${r.nodeId} $outs $egresses") ++ ingresses).mkString("\n")
}.mkString("\n")
ElaborationArtefacts.add(prepend("noc.adjlist"), adjList)
val xys = routers.map(r => {
val n = r.nodeId
val ids = (Seq(r.nodeId.toString)
++ r.egressParams.map(e => s"e${e.egressId}")
++ r.ingressParams.map(i => s"i${i.ingressId}")
)
val plotter = nocParams.topology.plotter
val coords = (Seq(plotter.node(r.nodeId))
++ Seq.tabulate(r.egressParams.size ) { i => plotter. egress(i, r. egressParams.size, r.nodeId) }
++ Seq.tabulate(r.ingressParams.size) { i => plotter.ingress(i, r.ingressParams.size, r.nodeId) }
)
(ids zip coords).map { case (i, (x, y)) => s"$i $x $y" }.mkString("\n")
}).mkString("\n")
ElaborationArtefacts.add(prepend("noc.xy"), xys)
val edgeProps = routers.map { r =>
val outs = r.outParams.map { o =>
(Seq(s"${r.nodeId} ${o.destId}") ++ (if (o.possibleFlows.size == 0) Some("unused") else None))
.mkString(" ")
}
val egresses = r.egressParams.map { e =>
(Seq(s"${r.nodeId} e${e.egressId}") ++ (if (e.possibleFlows.size == 0) Some("unused") else None))
.mkString(" ")
}
val ingresses = r.ingressParams.map { i =>
(Seq(s"i${i.ingressId} ${r.nodeId}") ++ (if (i.possibleFlows.size == 0) Some("unused") else None))
.mkString(" ")
}
(outs ++ egresses ++ ingresses).mkString("\n")
}.mkString("\n")
ElaborationArtefacts.add(prepend("noc.edgeprops"), edgeProps)
println(s"Constellation: $nocName Finished NoC RTL generation")
}
}
| module TLNoC_2_router_14ClockSinkDomain( // @[ClockDomain.scala:14:9]
output [3:0] auto_routers_debug_out_va_stall_0, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_routers_debug_out_va_stall_1, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_routers_debug_out_va_stall_2, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_routers_debug_out_sa_stall_0, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_routers_debug_out_sa_stall_1, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_routers_debug_out_sa_stall_2, // @[LazyModuleImp.scala:107:25]
output auto_routers_source_nodes_out_2_flit_0_valid, // @[LazyModuleImp.scala:107:25]
output auto_routers_source_nodes_out_2_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_routers_source_nodes_out_2_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_routers_source_nodes_out_2_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_source_nodes_out_2_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_routers_source_nodes_out_2_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_source_nodes_out_2_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_routers_source_nodes_out_2_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_source_nodes_out_2_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_routers_source_nodes_out_2_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
input [9:0] auto_routers_source_nodes_out_2_credit_return, // @[LazyModuleImp.scala:107:25]
input [9:0] auto_routers_source_nodes_out_2_vc_free, // @[LazyModuleImp.scala:107:25]
output auto_routers_source_nodes_out_1_flit_0_valid, // @[LazyModuleImp.scala:107:25]
output auto_routers_source_nodes_out_1_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_routers_source_nodes_out_1_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_routers_source_nodes_out_1_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_source_nodes_out_1_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_routers_source_nodes_out_1_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_source_nodes_out_1_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_routers_source_nodes_out_1_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_source_nodes_out_1_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_routers_source_nodes_out_1_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
input [9:0] auto_routers_source_nodes_out_1_credit_return, // @[LazyModuleImp.scala:107:25]
input [9:0] auto_routers_source_nodes_out_1_vc_free, // @[LazyModuleImp.scala:107:25]
output auto_routers_source_nodes_out_0_flit_0_valid, // @[LazyModuleImp.scala:107:25]
output auto_routers_source_nodes_out_0_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_routers_source_nodes_out_0_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_routers_source_nodes_out_0_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_source_nodes_out_0_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_routers_source_nodes_out_0_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_source_nodes_out_0_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_routers_source_nodes_out_0_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_source_nodes_out_0_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_routers_source_nodes_out_0_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
input [9:0] auto_routers_source_nodes_out_0_credit_return, // @[LazyModuleImp.scala:107:25]
input [9:0] auto_routers_source_nodes_out_0_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_routers_dest_nodes_in_2_flit_0_valid, // @[LazyModuleImp.scala:107:25]
input auto_routers_dest_nodes_in_2_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_routers_dest_nodes_in_2_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_routers_dest_nodes_in_2_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_routers_dest_nodes_in_2_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_routers_dest_nodes_in_2_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_routers_dest_nodes_in_2_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_routers_dest_nodes_in_2_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_routers_dest_nodes_in_2_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_routers_dest_nodes_in_2_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
output [9:0] auto_routers_dest_nodes_in_2_credit_return, // @[LazyModuleImp.scala:107:25]
output [9:0] auto_routers_dest_nodes_in_2_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_routers_dest_nodes_in_1_flit_0_valid, // @[LazyModuleImp.scala:107:25]
input auto_routers_dest_nodes_in_1_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_routers_dest_nodes_in_1_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_routers_dest_nodes_in_1_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_routers_dest_nodes_in_1_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_routers_dest_nodes_in_1_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_routers_dest_nodes_in_1_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_routers_dest_nodes_in_1_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_routers_dest_nodes_in_1_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_routers_dest_nodes_in_1_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
output [9:0] auto_routers_dest_nodes_in_1_credit_return, // @[LazyModuleImp.scala:107:25]
output [9:0] auto_routers_dest_nodes_in_1_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_routers_dest_nodes_in_0_flit_0_valid, // @[LazyModuleImp.scala:107:25]
input auto_routers_dest_nodes_in_0_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_routers_dest_nodes_in_0_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_routers_dest_nodes_in_0_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_routers_dest_nodes_in_0_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_routers_dest_nodes_in_0_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_routers_dest_nodes_in_0_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_routers_dest_nodes_in_0_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_routers_dest_nodes_in_0_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_routers_dest_nodes_in_0_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
output [9:0] auto_routers_dest_nodes_in_0_credit_return, // @[LazyModuleImp.scala:107:25]
output [9:0] auto_routers_dest_nodes_in_0_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_clock_in_clock, // @[LazyModuleImp.scala:107:25]
input auto_clock_in_reset // @[LazyModuleImp.scala:107:25]
);
Router_61 routers ( // @[NoC.scala:67:22]
.clock (auto_clock_in_clock),
.reset (auto_clock_in_reset),
.auto_debug_out_va_stall_0 (auto_routers_debug_out_va_stall_0),
.auto_debug_out_va_stall_1 (auto_routers_debug_out_va_stall_1),
.auto_debug_out_va_stall_2 (auto_routers_debug_out_va_stall_2),
.auto_debug_out_sa_stall_0 (auto_routers_debug_out_sa_stall_0),
.auto_debug_out_sa_stall_1 (auto_routers_debug_out_sa_stall_1),
.auto_debug_out_sa_stall_2 (auto_routers_debug_out_sa_stall_2),
.auto_source_nodes_out_2_flit_0_valid (auto_routers_source_nodes_out_2_flit_0_valid),
.auto_source_nodes_out_2_flit_0_bits_head (auto_routers_source_nodes_out_2_flit_0_bits_head),
.auto_source_nodes_out_2_flit_0_bits_tail (auto_routers_source_nodes_out_2_flit_0_bits_tail),
.auto_source_nodes_out_2_flit_0_bits_payload (auto_routers_source_nodes_out_2_flit_0_bits_payload),
.auto_source_nodes_out_2_flit_0_bits_flow_vnet_id (auto_routers_source_nodes_out_2_flit_0_bits_flow_vnet_id),
.auto_source_nodes_out_2_flit_0_bits_flow_ingress_node (auto_routers_source_nodes_out_2_flit_0_bits_flow_ingress_node),
.auto_source_nodes_out_2_flit_0_bits_flow_ingress_node_id (auto_routers_source_nodes_out_2_flit_0_bits_flow_ingress_node_id),
.auto_source_nodes_out_2_flit_0_bits_flow_egress_node (auto_routers_source_nodes_out_2_flit_0_bits_flow_egress_node),
.auto_source_nodes_out_2_flit_0_bits_flow_egress_node_id (auto_routers_source_nodes_out_2_flit_0_bits_flow_egress_node_id),
.auto_source_nodes_out_2_flit_0_bits_virt_channel_id (auto_routers_source_nodes_out_2_flit_0_bits_virt_channel_id),
.auto_source_nodes_out_2_credit_return (auto_routers_source_nodes_out_2_credit_return),
.auto_source_nodes_out_2_vc_free (auto_routers_source_nodes_out_2_vc_free),
.auto_source_nodes_out_1_flit_0_valid (auto_routers_source_nodes_out_1_flit_0_valid),
.auto_source_nodes_out_1_flit_0_bits_head (auto_routers_source_nodes_out_1_flit_0_bits_head),
.auto_source_nodes_out_1_flit_0_bits_tail (auto_routers_source_nodes_out_1_flit_0_bits_tail),
.auto_source_nodes_out_1_flit_0_bits_payload (auto_routers_source_nodes_out_1_flit_0_bits_payload),
.auto_source_nodes_out_1_flit_0_bits_flow_vnet_id (auto_routers_source_nodes_out_1_flit_0_bits_flow_vnet_id),
.auto_source_nodes_out_1_flit_0_bits_flow_ingress_node (auto_routers_source_nodes_out_1_flit_0_bits_flow_ingress_node),
.auto_source_nodes_out_1_flit_0_bits_flow_ingress_node_id (auto_routers_source_nodes_out_1_flit_0_bits_flow_ingress_node_id),
.auto_source_nodes_out_1_flit_0_bits_flow_egress_node (auto_routers_source_nodes_out_1_flit_0_bits_flow_egress_node),
.auto_source_nodes_out_1_flit_0_bits_flow_egress_node_id (auto_routers_source_nodes_out_1_flit_0_bits_flow_egress_node_id),
.auto_source_nodes_out_1_flit_0_bits_virt_channel_id (auto_routers_source_nodes_out_1_flit_0_bits_virt_channel_id),
.auto_source_nodes_out_1_credit_return (auto_routers_source_nodes_out_1_credit_return),
.auto_source_nodes_out_1_vc_free (auto_routers_source_nodes_out_1_vc_free),
.auto_source_nodes_out_0_flit_0_valid (auto_routers_source_nodes_out_0_flit_0_valid),
.auto_source_nodes_out_0_flit_0_bits_head (auto_routers_source_nodes_out_0_flit_0_bits_head),
.auto_source_nodes_out_0_flit_0_bits_tail (auto_routers_source_nodes_out_0_flit_0_bits_tail),
.auto_source_nodes_out_0_flit_0_bits_payload (auto_routers_source_nodes_out_0_flit_0_bits_payload),
.auto_source_nodes_out_0_flit_0_bits_flow_vnet_id (auto_routers_source_nodes_out_0_flit_0_bits_flow_vnet_id),
.auto_source_nodes_out_0_flit_0_bits_flow_ingress_node (auto_routers_source_nodes_out_0_flit_0_bits_flow_ingress_node),
.auto_source_nodes_out_0_flit_0_bits_flow_ingress_node_id (auto_routers_source_nodes_out_0_flit_0_bits_flow_ingress_node_id),
.auto_source_nodes_out_0_flit_0_bits_flow_egress_node (auto_routers_source_nodes_out_0_flit_0_bits_flow_egress_node),
.auto_source_nodes_out_0_flit_0_bits_flow_egress_node_id (auto_routers_source_nodes_out_0_flit_0_bits_flow_egress_node_id),
.auto_source_nodes_out_0_flit_0_bits_virt_channel_id (auto_routers_source_nodes_out_0_flit_0_bits_virt_channel_id),
.auto_source_nodes_out_0_credit_return (auto_routers_source_nodes_out_0_credit_return),
.auto_source_nodes_out_0_vc_free (auto_routers_source_nodes_out_0_vc_free),
.auto_dest_nodes_in_2_flit_0_valid (auto_routers_dest_nodes_in_2_flit_0_valid),
.auto_dest_nodes_in_2_flit_0_bits_head (auto_routers_dest_nodes_in_2_flit_0_bits_head),
.auto_dest_nodes_in_2_flit_0_bits_tail (auto_routers_dest_nodes_in_2_flit_0_bits_tail),
.auto_dest_nodes_in_2_flit_0_bits_payload (auto_routers_dest_nodes_in_2_flit_0_bits_payload),
.auto_dest_nodes_in_2_flit_0_bits_flow_vnet_id (auto_routers_dest_nodes_in_2_flit_0_bits_flow_vnet_id),
.auto_dest_nodes_in_2_flit_0_bits_flow_ingress_node (auto_routers_dest_nodes_in_2_flit_0_bits_flow_ingress_node),
.auto_dest_nodes_in_2_flit_0_bits_flow_ingress_node_id (auto_routers_dest_nodes_in_2_flit_0_bits_flow_ingress_node_id),
.auto_dest_nodes_in_2_flit_0_bits_flow_egress_node (auto_routers_dest_nodes_in_2_flit_0_bits_flow_egress_node),
.auto_dest_nodes_in_2_flit_0_bits_flow_egress_node_id (auto_routers_dest_nodes_in_2_flit_0_bits_flow_egress_node_id),
.auto_dest_nodes_in_2_flit_0_bits_virt_channel_id (auto_routers_dest_nodes_in_2_flit_0_bits_virt_channel_id),
.auto_dest_nodes_in_2_credit_return (auto_routers_dest_nodes_in_2_credit_return),
.auto_dest_nodes_in_2_vc_free (auto_routers_dest_nodes_in_2_vc_free),
.auto_dest_nodes_in_1_flit_0_valid (auto_routers_dest_nodes_in_1_flit_0_valid),
.auto_dest_nodes_in_1_flit_0_bits_head (auto_routers_dest_nodes_in_1_flit_0_bits_head),
.auto_dest_nodes_in_1_flit_0_bits_tail (auto_routers_dest_nodes_in_1_flit_0_bits_tail),
.auto_dest_nodes_in_1_flit_0_bits_payload (auto_routers_dest_nodes_in_1_flit_0_bits_payload),
.auto_dest_nodes_in_1_flit_0_bits_flow_vnet_id (auto_routers_dest_nodes_in_1_flit_0_bits_flow_vnet_id),
.auto_dest_nodes_in_1_flit_0_bits_flow_ingress_node (auto_routers_dest_nodes_in_1_flit_0_bits_flow_ingress_node),
.auto_dest_nodes_in_1_flit_0_bits_flow_ingress_node_id (auto_routers_dest_nodes_in_1_flit_0_bits_flow_ingress_node_id),
.auto_dest_nodes_in_1_flit_0_bits_flow_egress_node (auto_routers_dest_nodes_in_1_flit_0_bits_flow_egress_node),
.auto_dest_nodes_in_1_flit_0_bits_flow_egress_node_id (auto_routers_dest_nodes_in_1_flit_0_bits_flow_egress_node_id),
.auto_dest_nodes_in_1_flit_0_bits_virt_channel_id (auto_routers_dest_nodes_in_1_flit_0_bits_virt_channel_id),
.auto_dest_nodes_in_1_credit_return (auto_routers_dest_nodes_in_1_credit_return),
.auto_dest_nodes_in_1_vc_free (auto_routers_dest_nodes_in_1_vc_free),
.auto_dest_nodes_in_0_flit_0_valid (auto_routers_dest_nodes_in_0_flit_0_valid),
.auto_dest_nodes_in_0_flit_0_bits_head (auto_routers_dest_nodes_in_0_flit_0_bits_head),
.auto_dest_nodes_in_0_flit_0_bits_tail (auto_routers_dest_nodes_in_0_flit_0_bits_tail),
.auto_dest_nodes_in_0_flit_0_bits_payload (auto_routers_dest_nodes_in_0_flit_0_bits_payload),
.auto_dest_nodes_in_0_flit_0_bits_flow_vnet_id (auto_routers_dest_nodes_in_0_flit_0_bits_flow_vnet_id),
.auto_dest_nodes_in_0_flit_0_bits_flow_ingress_node (auto_routers_dest_nodes_in_0_flit_0_bits_flow_ingress_node),
.auto_dest_nodes_in_0_flit_0_bits_flow_ingress_node_id (auto_routers_dest_nodes_in_0_flit_0_bits_flow_ingress_node_id),
.auto_dest_nodes_in_0_flit_0_bits_flow_egress_node (auto_routers_dest_nodes_in_0_flit_0_bits_flow_egress_node),
.auto_dest_nodes_in_0_flit_0_bits_flow_egress_node_id (auto_routers_dest_nodes_in_0_flit_0_bits_flow_egress_node_id),
.auto_dest_nodes_in_0_flit_0_bits_virt_channel_id (auto_routers_dest_nodes_in_0_flit_0_bits_virt_channel_id),
.auto_dest_nodes_in_0_credit_return (auto_routers_dest_nodes_in_0_credit_return),
.auto_dest_nodes_in_0_vc_free (auto_routers_dest_nodes_in_0_vc_free)
); // @[NoC.scala:67:22]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File TilelinkAdapters.scala:
package constellation.protocol
import chisel3._
import chisel3.util._
import constellation.channel._
import constellation.noc._
import constellation.soc.{CanAttachToGlobalNoC}
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.util._
import freechips.rocketchip.tilelink._
import scala.collection.immutable.{ListMap}
abstract class TLChannelToNoC[T <: TLChannel](gen: => T, edge: TLEdge, idToEgress: Int => Int)(implicit val p: Parameters) extends Module with TLFieldHelper {
val flitWidth = minTLPayloadWidth(gen)
val io = IO(new Bundle {
val protocol = Flipped(Decoupled(gen))
val flit = Decoupled(new IngressFlit(flitWidth))
})
def unique(x: Vector[Boolean]): Bool = (x.filter(x=>x).size <= 1).B
// convert decoupled to irrevocable
val q = Module(new Queue(gen, 1, pipe=true, flow=true))
val protocol = q.io.deq
val has_body = Wire(Bool())
val body_fields = getBodyFields(protocol.bits)
val const_fields = getConstFields(protocol.bits)
val head = edge.first(protocol.bits, protocol.fire)
val tail = edge.last(protocol.bits, protocol.fire)
def requestOH: Seq[Bool]
val body = Cat( body_fields.filter(_.getWidth > 0).map(_.asUInt))
val const = Cat(const_fields.filter(_.getWidth > 0).map(_.asUInt))
val is_body = RegInit(false.B)
io.flit.valid := protocol.valid
protocol.ready := io.flit.ready && (is_body || !has_body)
io.flit.bits.head := head && !is_body
io.flit.bits.tail := tail && (is_body || !has_body)
io.flit.bits.egress_id := Mux1H(requestOH.zipWithIndex.map { case (r, i) =>
r -> idToEgress(i).U
})
io.flit.bits.payload := Mux(is_body, body, const)
when (io.flit.fire && io.flit.bits.head) { is_body := true.B }
when (io.flit.fire && io.flit.bits.tail) { is_body := false.B }
}
abstract class TLChannelFromNoC[T <: TLChannel](gen: => T)(implicit val p: Parameters) extends Module with TLFieldHelper {
val flitWidth = minTLPayloadWidth(gen)
val io = IO(new Bundle {
val protocol = Decoupled(gen)
val flit = Flipped(Decoupled(new EgressFlit(flitWidth)))
})
// Handle size = 1 gracefully (Chisel3 empty range is broken)
def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0)
val protocol = Wire(Decoupled(gen))
val body_fields = getBodyFields(protocol.bits)
val const_fields = getConstFields(protocol.bits)
val is_const = RegInit(true.B)
val const_reg = Reg(UInt(const_fields.map(_.getWidth).sum.W))
val const = Mux(io.flit.bits.head, io.flit.bits.payload, const_reg)
io.flit.ready := (is_const && !io.flit.bits.tail) || protocol.ready
protocol.valid := (!is_const || io.flit.bits.tail) && io.flit.valid
def assign(i: UInt, sigs: Seq[Data]) = {
var t = i
for (s <- sigs.reverse) {
s := t.asTypeOf(s.cloneType)
t = t >> s.getWidth
}
}
assign(const, const_fields)
assign(io.flit.bits.payload, body_fields)
when (io.flit.fire && io.flit.bits.head) { is_const := false.B; const_reg := io.flit.bits.payload }
when (io.flit.fire && io.flit.bits.tail) { is_const := true.B }
}
trait HasAddressDecoder {
// Filter a list to only those elements selected
def filter[T](data: Seq[T], mask: Seq[Boolean]) = (data zip mask).filter(_._2).map(_._1)
val edgeIn: TLEdge
val edgesOut: Seq[TLEdge]
lazy val reacheableIO = edgesOut.map { mp =>
edgeIn.client.clients.exists { c => mp.manager.managers.exists { m =>
c.visibility.exists { ca => m.address.exists { ma =>
ca.overlaps(ma)
}}
}}
}.toVector
lazy val releaseIO = (edgesOut zip reacheableIO).map { case (mp, reachable) =>
reachable && edgeIn.client.anySupportProbe && mp.manager.anySupportAcquireB
}.toVector
def outputPortFn(connectIO: Seq[Boolean]) = {
val port_addrs = edgesOut.map(_.manager.managers.flatMap(_.address))
val routingMask = AddressDecoder(filter(port_addrs, connectIO))
val route_addrs = port_addrs.map(seq => AddressSet.unify(seq.map(_.widen(~routingMask)).distinct))
route_addrs.map(seq => (addr: UInt) => seq.map(_.contains(addr)).reduce(_||_))
}
}
class TLAToNoC(
val edgeIn: TLEdge,
val edgesOut: Seq[TLEdge],
bundle: TLBundleParameters,
slaveToAEgress: Int => Int,
sourceStart: Int
)(implicit p: Parameters) extends TLChannelToNoC(new TLBundleA(bundle), edgeIn, slaveToAEgress)(p) with HasAddressDecoder {
has_body := edgeIn.hasData(protocol.bits) || (~protocol.bits.mask =/= 0.U)
lazy val connectAIO = reacheableIO
lazy val requestOH = outputPortFn(connectAIO).zipWithIndex.map { case (o, j) =>
connectAIO(j).B && (unique(connectAIO) || o(protocol.bits.address))
}
q.io.enq <> io.protocol
q.io.enq.bits.source := io.protocol.bits.source | sourceStart.U
}
class TLAFromNoC(edgeOut: TLEdge, bundle: TLBundleParameters)(implicit p: Parameters) extends TLChannelFromNoC(new TLBundleA(bundle))(p) {
io.protocol <> protocol
when (io.flit.bits.head) { io.protocol.bits.mask := ~(0.U(io.protocol.bits.mask.getWidth.W)) }
}
class TLBToNoC(
edgeOut: TLEdge,
edgesIn: Seq[TLEdge],
bundle: TLBundleParameters,
masterToBIngress: Int => Int
)(implicit p: Parameters) extends TLChannelToNoC(new TLBundleB(bundle), edgeOut, masterToBIngress)(p) {
has_body := edgeOut.hasData(protocol.bits) || (~protocol.bits.mask =/= 0.U)
lazy val inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client))
lazy val requestOH = inputIdRanges.map { i => i.contains(protocol.bits.source) }
q.io.enq <> io.protocol
}
class TLBFromNoC(edgeIn: TLEdge, bundle: TLBundleParameters, sourceSize: Int)(implicit p: Parameters) extends TLChannelFromNoC(new TLBundleB(bundle))(p) {
io.protocol <> protocol
io.protocol.bits.source := trim(protocol.bits.source, sourceSize)
when (io.flit.bits.head) { io.protocol.bits.mask := ~(0.U(io.protocol.bits.mask.getWidth.W)) }
}
class TLCToNoC(
val edgeIn: TLEdge,
val edgesOut: Seq[TLEdge],
bundle: TLBundleParameters,
slaveToCEgress: Int => Int,
sourceStart: Int
)(implicit p: Parameters) extends TLChannelToNoC(new TLBundleC(bundle), edgeIn, slaveToCEgress)(p) with HasAddressDecoder {
has_body := edgeIn.hasData(protocol.bits)
lazy val connectCIO = releaseIO
lazy val requestOH = outputPortFn(connectCIO).zipWithIndex.map {
case (o, j) => connectCIO(j).B && (unique(connectCIO) || o(protocol.bits.address))
}
q.io.enq <> io.protocol
q.io.enq.bits.source := io.protocol.bits.source | sourceStart.U
}
class TLCFromNoC(edgeOut: TLEdge, bundle: TLBundleParameters)(implicit p: Parameters) extends TLChannelFromNoC(new TLBundleC(bundle))(p) {
io.protocol <> protocol
}
class TLDToNoC(
edgeOut: TLEdge,
edgesIn: Seq[TLEdge],
bundle: TLBundleParameters,
masterToDIngress: Int => Int,
sourceStart: Int
)(implicit p: Parameters) extends TLChannelToNoC(new TLBundleD(bundle), edgeOut, masterToDIngress)(p) {
has_body := edgeOut.hasData(protocol.bits)
lazy val inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client))
lazy val requestOH = inputIdRanges.map { i => i.contains(protocol.bits.source) }
q.io.enq <> io.protocol
q.io.enq.bits.sink := io.protocol.bits.sink | sourceStart.U
}
class TLDFromNoC(edgeIn: TLEdge, bundle: TLBundleParameters, sourceSize: Int)(implicit p: Parameters) extends TLChannelFromNoC(new TLBundleD(bundle))(p)
{
io.protocol <> protocol
io.protocol.bits.source := trim(protocol.bits.source, sourceSize)
}
class TLEToNoC(
val edgeIn: TLEdge,
val edgesOut: Seq[TLEdge],
bundle: TLBundleParameters,
slaveToEEgress: Int => Int
)(implicit p: Parameters) extends TLChannelToNoC(new TLBundleE(bundle), edgeIn, slaveToEEgress)(p) {
has_body := edgeIn.hasData(protocol.bits)
lazy val outputIdRanges = TLXbar.mapOutputIds(edgesOut.map(_.manager))
lazy val requestOH = outputIdRanges.map { o => o.contains(protocol.bits.sink) }
q.io.enq <> io.protocol
}
class TLEFromNoC(edgeOut: TLEdge, bundle: TLBundleParameters, sourceSize: Int)(implicit p: Parameters) extends TLChannelFromNoC(new TLBundleE(bundle))(p) {
io.protocol <> protocol
io.protocol.bits.sink := trim(protocol.bits.sink, sourceSize)
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLAToNoC_10( // @[TilelinkAdapters.scala:112:7]
input clock, // @[TilelinkAdapters.scala:112:7]
input reset, // @[TilelinkAdapters.scala:112:7]
output io_protocol_ready, // @[TilelinkAdapters.scala:19:14]
input io_protocol_valid, // @[TilelinkAdapters.scala:19:14]
input [2:0] io_protocol_bits_opcode, // @[TilelinkAdapters.scala:19:14]
input [2:0] io_protocol_bits_param, // @[TilelinkAdapters.scala:19:14]
input [3:0] io_protocol_bits_size, // @[TilelinkAdapters.scala:19:14]
input [6:0] io_protocol_bits_source, // @[TilelinkAdapters.scala:19:14]
input [31:0] io_protocol_bits_address, // @[TilelinkAdapters.scala:19:14]
input [15:0] io_protocol_bits_mask, // @[TilelinkAdapters.scala:19:14]
input [127:0] io_protocol_bits_data, // @[TilelinkAdapters.scala:19:14]
input io_protocol_bits_corrupt, // @[TilelinkAdapters.scala:19:14]
input io_flit_ready, // @[TilelinkAdapters.scala:19:14]
output io_flit_valid, // @[TilelinkAdapters.scala:19:14]
output io_flit_bits_head, // @[TilelinkAdapters.scala:19:14]
output io_flit_bits_tail, // @[TilelinkAdapters.scala:19:14]
output [144:0] io_flit_bits_payload, // @[TilelinkAdapters.scala:19:14]
output [4:0] io_flit_bits_egress_id // @[TilelinkAdapters.scala:19:14]
);
wire [16:0] _GEN; // @[TilelinkAdapters.scala:119:{45,69}]
wire _q_io_deq_valid; // @[TilelinkAdapters.scala:26:17]
wire [2:0] _q_io_deq_bits_opcode; // @[TilelinkAdapters.scala:26:17]
wire [2:0] _q_io_deq_bits_param; // @[TilelinkAdapters.scala:26:17]
wire [3:0] _q_io_deq_bits_size; // @[TilelinkAdapters.scala:26:17]
wire [6:0] _q_io_deq_bits_source; // @[TilelinkAdapters.scala:26:17]
wire [31:0] _q_io_deq_bits_address; // @[TilelinkAdapters.scala:26:17]
wire [15:0] _q_io_deq_bits_mask; // @[TilelinkAdapters.scala:26:17]
wire [127:0] _q_io_deq_bits_data; // @[TilelinkAdapters.scala:26:17]
wire _q_io_deq_bits_corrupt; // @[TilelinkAdapters.scala:26:17]
wire [26:0] _tail_beats1_decode_T = 27'hFFF << _q_io_deq_bits_size; // @[package.scala:243:71]
reg [7:0] head_counter; // @[Edges.scala:229:27]
wire head = head_counter == 8'h0; // @[Edges.scala:229:27, :231:25]
wire [7:0] tail_beats1 = _q_io_deq_bits_opcode[2] ? 8'h0 : ~(_tail_beats1_decode_T[11:4]); // @[package.scala:243:{46,71,76}]
reg [7:0] tail_counter; // @[Edges.scala:229:27]
reg is_body; // @[TilelinkAdapters.scala:39:24]
wire _io_flit_bits_tail_T = _GEN == 17'h0; // @[TilelinkAdapters.scala:119:{45,69}]
wire q_io_deq_ready = io_flit_ready & (is_body | _io_flit_bits_tail_T); // @[TilelinkAdapters.scala:39:24, :41:{35,47}, :119:{45,69}]
wire io_flit_bits_head_0 = head & ~is_body; // @[Edges.scala:231:25]
wire io_flit_bits_tail_0 = (tail_counter == 8'h1 | tail_beats1 == 8'h0) & (is_body | _io_flit_bits_tail_T); // @[Edges.scala:221:14, :229:27, :232:{25,33,43}]
wire [21:0] _GEN_0 = _q_io_deq_bits_address[27:6] ^ 22'h200001; // @[Parameters.scala:137:31]
wire [25:0] _io_flit_bits_egress_id_requestOH_T_35 = _q_io_deq_bits_address[31:6] ^ 26'h2000001; // @[Parameters.scala:137:31]
wire [21:0] _GEN_1 = _q_io_deq_bits_address[27:6] ^ 22'h200002; // @[Parameters.scala:137:31]
wire [25:0] _io_flit_bits_egress_id_requestOH_T_47 = _q_io_deq_bits_address[31:6] ^ 26'h2000002; // @[Parameters.scala:137:31]
wire [21:0] _GEN_2 = _q_io_deq_bits_address[27:6] ^ 22'h200003; // @[Parameters.scala:137:31]
wire [25:0] _io_flit_bits_egress_id_requestOH_T_59 = _q_io_deq_bits_address[31:6] ^ 26'h2000003; // @[Parameters.scala:137:31]
assign _GEN = {~(_q_io_deq_bits_opcode[2]), ~_q_io_deq_bits_mask}; // @[Edges.scala:92:{28,37}]
wire _GEN_3 = io_flit_ready & _q_io_deq_valid; // @[Decoupled.scala:51:35]
always @(posedge clock) begin // @[TilelinkAdapters.scala:112:7]
if (reset) begin // @[TilelinkAdapters.scala:112:7]
head_counter <= 8'h0; // @[Edges.scala:229:27]
tail_counter <= 8'h0; // @[Edges.scala:229:27]
is_body <= 1'h0; // @[TilelinkAdapters.scala:39:24, :112:7]
end
else begin // @[TilelinkAdapters.scala:112:7]
if (q_io_deq_ready & _q_io_deq_valid) begin // @[Decoupled.scala:51:35]
head_counter <= head ? (_q_io_deq_bits_opcode[2] ? 8'h0 : ~(_tail_beats1_decode_T[11:4])) : head_counter - 8'h1; // @[package.scala:243:{46,71,76}]
tail_counter <= tail_counter == 8'h0 ? tail_beats1 : tail_counter - 8'h1; // @[Edges.scala:221:14, :229:27, :230:28, :231:25, :236:21]
end
is_body <= ~(_GEN_3 & io_flit_bits_tail_0) & (_GEN_3 & io_flit_bits_head_0 | is_body); // @[Decoupled.scala:51:35]
end
always @(posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File Tile.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
import Util._
/**
* A Tile is a purely combinational 2D array of passThrough PEs.
* a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs
* @param width The data width of each PE in bits
* @param rows Number of PEs on each row
* @param columns Number of PEs on each column
*/
class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module {
val io = IO(new Bundle {
val in_a = Input(Vec(rows, inputType))
val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it
val in_d = Input(Vec(columns, outputType))
val in_control = Input(Vec(columns, new PEControl(accType)))
val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val in_last = Input(Vec(columns, Bool()))
val out_a = Output(Vec(rows, inputType))
val out_c = Output(Vec(columns, outputType))
val out_b = Output(Vec(columns, outputType))
val out_control = Output(Vec(columns, new PEControl(accType)))
val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val out_last = Output(Vec(columns, Bool()))
val in_valid = Input(Vec(columns, Bool()))
val out_valid = Output(Vec(columns, Bool()))
val bad_dataflow = Output(Bool())
})
import ev._
val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls)))
val tileT = tile.transpose
// TODO: abstract hori/vert broadcast, all these connections look the same
// Broadcast 'a' horizontally across the Tile
for (r <- 0 until rows) {
tile(r).foldLeft(io.in_a(r)) {
case (in_a, pe) =>
pe.io.in_a := in_a
pe.io.out_a
}
}
// Broadcast 'b' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_b(c)) {
case (in_b, pe) =>
pe.io.in_b := (if (tree_reduction) in_b.zero else in_b)
pe.io.out_b
}
}
// Broadcast 'd' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_d(c)) {
case (in_d, pe) =>
pe.io.in_d := in_d
pe.io.out_c
}
}
// Broadcast 'control' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_control(c)) {
case (in_ctrl, pe) =>
pe.io.in_control := in_ctrl
pe.io.out_control
}
}
// Broadcast 'garbage' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_valid(c)) {
case (v, pe) =>
pe.io.in_valid := v
pe.io.out_valid
}
}
// Broadcast 'id' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_id(c)) {
case (id, pe) =>
pe.io.in_id := id
pe.io.out_id
}
}
// Broadcast 'last' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_last(c)) {
case (last, pe) =>
pe.io.in_last := last
pe.io.out_last
}
}
// Drive the Tile's bottom IO
for (c <- 0 until columns) {
io.out_c(c) := tile(rows-1)(c).io.out_c
io.out_control(c) := tile(rows-1)(c).io.out_control
io.out_id(c) := tile(rows-1)(c).io.out_id
io.out_last(c) := tile(rows-1)(c).io.out_last
io.out_valid(c) := tile(rows-1)(c).io.out_valid
io.out_b(c) := {
if (tree_reduction) {
val prods = tileT(c).map(_.io.out_b)
accumulateTree(prods :+ io.in_b(c))
} else {
tile(rows - 1)(c).io.out_b
}
}
}
io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_)
// Drive the Tile's right IO
for (r <- 0 until rows) {
io.out_a(r) := tile(r)(columns-1).io.out_a
}
}
| module Tile_203( // @[Tile.scala:16:7]
input clock, // @[Tile.scala:16:7]
input reset, // @[Tile.scala:16:7]
input [7:0] io_in_a_0, // @[Tile.scala:17:14]
input [19:0] io_in_b_0, // @[Tile.scala:17:14]
input [19:0] io_in_d_0, // @[Tile.scala:17:14]
input io_in_control_0_dataflow, // @[Tile.scala:17:14]
input io_in_control_0_propagate, // @[Tile.scala:17:14]
input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14]
input [2:0] io_in_id_0, // @[Tile.scala:17:14]
input io_in_last_0, // @[Tile.scala:17:14]
output [7:0] io_out_a_0, // @[Tile.scala:17:14]
output [19:0] io_out_c_0, // @[Tile.scala:17:14]
output [19:0] io_out_b_0, // @[Tile.scala:17:14]
output io_out_control_0_dataflow, // @[Tile.scala:17:14]
output io_out_control_0_propagate, // @[Tile.scala:17:14]
output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14]
output [2:0] io_out_id_0, // @[Tile.scala:17:14]
output io_out_last_0, // @[Tile.scala:17:14]
input io_in_valid_0, // @[Tile.scala:17:14]
output io_out_valid_0, // @[Tile.scala:17:14]
output io_bad_dataflow // @[Tile.scala:17:14]
);
wire [7:0] io_in_a_0_0 = io_in_a_0; // @[Tile.scala:16:7]
wire [19:0] io_in_b_0_0 = io_in_b_0; // @[Tile.scala:16:7]
wire [19:0] io_in_d_0_0 = io_in_d_0; // @[Tile.scala:16:7]
wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7]
wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7]
wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7]
wire [2:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7]
wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7]
wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7]
wire [7:0] io_out_a_0_0; // @[Tile.scala:16:7]
wire [19:0] io_out_c_0_0; // @[Tile.scala:16:7]
wire [19:0] io_out_b_0_0; // @[Tile.scala:16:7]
wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
wire io_out_control_0_propagate_0; // @[Tile.scala:16:7]
wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7]
wire [2:0] io_out_id_0_0; // @[Tile.scala:16:7]
wire io_out_last_0_0; // @[Tile.scala:16:7]
wire io_out_valid_0_0; // @[Tile.scala:16:7]
wire io_bad_dataflow_0; // @[Tile.scala:16:7]
PE_459 tile_0_0 ( // @[Tile.scala:42:44]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0_0), // @[Tile.scala:16:7]
.io_in_b (io_in_b_0_0), // @[Tile.scala:16:7]
.io_in_d (io_in_d_0_0), // @[Tile.scala:16:7]
.io_out_a (io_out_a_0_0),
.io_out_b (io_out_b_0_0),
.io_out_c (io_out_c_0_0),
.io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7]
.io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7]
.io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7]
.io_out_control_dataflow (io_out_control_0_dataflow_0),
.io_out_control_propagate (io_out_control_0_propagate_0),
.io_out_control_shift (io_out_control_0_shift_0),
.io_in_id (io_in_id_0_0), // @[Tile.scala:16:7]
.io_out_id (io_out_id_0_0),
.io_in_last (io_in_last_0_0), // @[Tile.scala:16:7]
.io_out_last (io_out_last_0_0),
.io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7]
.io_out_valid (io_out_valid_0_0),
.io_bad_dataflow (io_bad_dataflow_0)
); // @[Tile.scala:42:44]
assign io_out_a_0 = io_out_a_0_0; // @[Tile.scala:16:7]
assign io_out_c_0 = io_out_c_0_0; // @[Tile.scala:16:7]
assign io_out_b_0 = io_out_b_0_0; // @[Tile.scala:16:7]
assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7]
assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7]
assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7]
assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7]
assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7]
assign io_bad_dataflow = io_bad_dataflow_0; // @[Tile.scala:16:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Tilelink.scala:
package constellation.protocol
import chisel3._
import chisel3.util._
import constellation.channel._
import constellation.noc._
import constellation.soc.{CanAttachToGlobalNoC}
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.util._
import freechips.rocketchip.tilelink._
import scala.collection.immutable.{ListMap}
trait TLFieldHelper {
def getBodyFields(b: TLChannel): Seq[Data] = b match {
case b: TLBundleA => Seq(b.mask, b.data, b.corrupt)
case b: TLBundleB => Seq(b.mask, b.data, b.corrupt)
case b: TLBundleC => Seq( b.data, b.corrupt)
case b: TLBundleD => Seq( b.data, b.corrupt)
case b: TLBundleE => Seq()
}
def getConstFields(b: TLChannel): Seq[Data] = b match {
case b: TLBundleA => Seq(b.opcode, b.param, b.size, b.source, b.address, b.user, b.echo )
case b: TLBundleB => Seq(b.opcode, b.param, b.size, b.source, b.address )
case b: TLBundleC => Seq(b.opcode, b.param, b.size, b.source, b.address, b.user, b.echo )
case b: TLBundleD => Seq(b.opcode, b.param, b.size, b.source, b.user, b.echo, b.sink, b.denied)
case b: TLBundleE => Seq( b.sink )
}
def minTLPayloadWidth(b: TLChannel): Int = Seq(getBodyFields(b), getConstFields(b)).map(_.map(_.getWidth).sum).max
def minTLPayloadWidth(bs: Seq[TLChannel]): Int = bs.map(b => minTLPayloadWidth(b)).max
def minTLPayloadWidth(b: TLBundle): Int = minTLPayloadWidth(Seq(b.a, b.b, b.c, b.d, b.e).map(_.bits))
}
class TLMasterToNoC(
edgeIn: TLEdge, edgesOut: Seq[TLEdge],
sourceStart: Int, sourceSize: Int,
wideBundle: TLBundleParameters,
slaveToEgressOffset: Int => Int,
flitWidth: Int
)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val tilelink = Flipped(new TLBundle(wideBundle))
val flits = new Bundle {
val a = Decoupled(new IngressFlit(flitWidth))
val b = Flipped(Decoupled(new EgressFlit(flitWidth)))
val c = Decoupled(new IngressFlit(flitWidth))
val d = Flipped(Decoupled(new EgressFlit(flitWidth)))
val e = Decoupled(new IngressFlit(flitWidth))
}
})
val a = Module(new TLAToNoC(edgeIn, edgesOut, wideBundle, (i) => slaveToEgressOffset(i) + 0, sourceStart))
val b = Module(new TLBFromNoC(edgeIn, wideBundle, sourceSize))
val c = Module(new TLCToNoC(edgeIn, edgesOut, wideBundle, (i) => slaveToEgressOffset(i) + 1, sourceStart))
val d = Module(new TLDFromNoC(edgeIn, wideBundle, sourceSize))
val e = Module(new TLEToNoC(edgeIn, edgesOut, wideBundle, (i) => slaveToEgressOffset(i) + 2))
a.io.protocol <> io.tilelink.a
io.tilelink.b <> b.io.protocol
c.io.protocol <> io.tilelink.c
io.tilelink.d <> d.io.protocol
e.io.protocol <> io.tilelink.e
io.flits.a <> a.io.flit
b.io.flit <> io.flits.b
io.flits.c <> c.io.flit
d.io.flit <> io.flits.d
io.flits.e <> e.io.flit
}
class TLMasterACDToNoC(
edgeIn: TLEdge, edgesOut: Seq[TLEdge],
sourceStart: Int, sourceSize: Int,
wideBundle: TLBundleParameters,
slaveToEgressOffset: Int => Int,
flitWidth: Int
)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val tilelink = Flipped(new TLBundle(wideBundle))
val flits = new Bundle {
val a = Decoupled(new IngressFlit(flitWidth))
val c = Decoupled(new IngressFlit(flitWidth))
val d = Flipped(Decoupled(new EgressFlit(flitWidth)))
}
})
io.tilelink := DontCare
val a = Module(new TLAToNoC(edgeIn, edgesOut, wideBundle, (i) => slaveToEgressOffset(i) + 0, sourceStart))
val c = Module(new TLCToNoC(edgeIn, edgesOut, wideBundle, (i) => slaveToEgressOffset(i) + 1, sourceStart))
val d = Module(new TLDFromNoC(edgeIn, wideBundle, sourceSize))
a.io.protocol <> io.tilelink.a
c.io.protocol <> io.tilelink.c
io.tilelink.d <> d.io.protocol
io.flits.a <> a.io.flit
io.flits.c <> c.io.flit
d.io.flit <> io.flits.d
}
class TLMasterBEToNoC(
edgeIn: TLEdge, edgesOut: Seq[TLEdge],
sourceStart: Int, sourceSize: Int,
wideBundle: TLBundleParameters,
slaveToEgressOffset: Int => Int,
flitWidth: Int
)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val tilelink = Flipped(new TLBundle(wideBundle))
val flits = new Bundle {
val b = Flipped(Decoupled(new EgressFlit(flitWidth)))
val e = Decoupled(new IngressFlit(flitWidth))
}
})
io.tilelink := DontCare
val b = Module(new TLBFromNoC(edgeIn, wideBundle, sourceSize))
val e = Module(new TLEToNoC(edgeIn, edgesOut, wideBundle, (i) => slaveToEgressOffset(i) + 0))
io.tilelink.b <> b.io.protocol
e.io.protocol <> io.tilelink.e
b.io.flit <> io.flits.b
io.flits.e <> e.io.flit
}
class TLSlaveToNoC(
edgeOut: TLEdge, edgesIn: Seq[TLEdge],
sourceStart: Int, sourceSize: Int,
wideBundle: TLBundleParameters,
masterToEgressOffset: Int => Int,
flitWidth: Int
)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val tilelink = new TLBundle(wideBundle)
val flits = new Bundle {
val a = Flipped(Decoupled(new EgressFlit(flitWidth)))
val b = Decoupled(new IngressFlit(flitWidth))
val c = Flipped(Decoupled(new EgressFlit(flitWidth)))
val d = Decoupled(new IngressFlit(flitWidth))
val e = Flipped(Decoupled(new EgressFlit(flitWidth)))
}
})
val a = Module(new TLAFromNoC(edgeOut, wideBundle))
val b = Module(new TLBToNoC(edgeOut, edgesIn, wideBundle, (i) => masterToEgressOffset(i) + 0))
val c = Module(new TLCFromNoC(edgeOut, wideBundle))
val d = Module(new TLDToNoC(edgeOut, edgesIn, wideBundle, (i) => masterToEgressOffset(i) + 1, sourceStart))
val e = Module(new TLEFromNoC(edgeOut, wideBundle, sourceSize))
io.tilelink.a <> a.io.protocol
b.io.protocol <> io.tilelink.b
io.tilelink.c <> c.io.protocol
d.io.protocol <> io.tilelink.d
io.tilelink.e <> e.io.protocol
a.io.flit <> io.flits.a
io.flits.b <> b.io.flit
c.io.flit <> io.flits.c
io.flits.d <> d.io.flit
e.io.flit <> io.flits.e
}
class TLSlaveACDToNoC(
edgeOut: TLEdge, edgesIn: Seq[TLEdge],
sourceStart: Int, sourceSize: Int,
wideBundle: TLBundleParameters,
masterToEgressOffset: Int => Int,
flitWidth: Int
)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val tilelink = new TLBundle(wideBundle)
val flits = new Bundle {
val a = Flipped(Decoupled(new EgressFlit(flitWidth)))
val c = Flipped(Decoupled(new EgressFlit(flitWidth)))
val d = Decoupled(new IngressFlit(flitWidth))
}
})
io.tilelink := DontCare
val a = Module(new TLAFromNoC(edgeOut, wideBundle))
val c = Module(new TLCFromNoC(edgeOut, wideBundle))
val d = Module(new TLDToNoC(edgeOut, edgesIn, wideBundle, (i) => masterToEgressOffset(i) + 0, sourceStart))
io.tilelink.a <> a.io.protocol
io.tilelink.c <> c.io.protocol
d.io.protocol <> io.tilelink.d
a.io.flit <> io.flits.a
c.io.flit <> io.flits.c
io.flits.d <> d.io.flit
}
class TLSlaveBEToNoC(
edgeOut: TLEdge, edgesIn: Seq[TLEdge],
sourceStart: Int, sourceSize: Int,
wideBundle: TLBundleParameters,
masterToEgressOffset: Int => Int,
flitWidth: Int
)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val tilelink = new TLBundle(wideBundle)
val flits = new Bundle {
val b = Decoupled(new IngressFlit(flitWidth))
val e = Flipped(Decoupled(new EgressFlit(flitWidth)))
}
})
io.tilelink := DontCare
val b = Module(new TLBToNoC(edgeOut, edgesIn, wideBundle, (i) => masterToEgressOffset(i) + 0))
val e = Module(new TLEFromNoC(edgeOut, wideBundle, sourceSize))
b.io.protocol <> io.tilelink.b
io.tilelink.e <> e.io.protocol
io.flits.b <> b.io.flit
e.io.flit <> io.flits.e
}
class TileLinkInterconnectInterface(edgesIn: Seq[TLEdge], edgesOut: Seq[TLEdge])(implicit val p: Parameters) extends Bundle {
val in = MixedVec(edgesIn.map { e => Flipped(new TLBundle(e.bundle)) })
val out = MixedVec(edgesOut.map { e => new TLBundle(e.bundle) })
}
trait TileLinkProtocolParams extends ProtocolParams with TLFieldHelper {
def edgesIn: Seq[TLEdge]
def edgesOut: Seq[TLEdge]
def edgeInNodes: Seq[Int]
def edgeOutNodes: Seq[Int]
require(edgesIn.size == edgeInNodes.size && edgesOut.size == edgeOutNodes.size)
def wideBundle = TLBundleParameters.union(edgesIn.map(_.bundle) ++ edgesOut.map(_.bundle))
def genBundle = new TLBundle(wideBundle)
def inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client))
def outputIdRanges = TLXbar.mapOutputIds(edgesOut.map(_.manager))
val vNetBlocking = (blocker: Int, blockee: Int) => blocker < blockee
def genIO()(implicit p: Parameters): Data = new TileLinkInterconnectInterface(edgesIn, edgesOut)
}
object TLConnect {
def apply[T <: TLBundleBase](l: DecoupledIO[T], r: DecoupledIO[T]) = {
l.valid := r.valid
r.ready := l.ready
l.bits.squeezeAll.waiveAll :<>= r.bits.squeezeAll.waiveAll
}
}
// BEGIN: TileLinkProtocolParams
case class TileLinkABCDEProtocolParams(
edgesIn: Seq[TLEdge],
edgesOut: Seq[TLEdge],
edgeInNodes: Seq[Int],
edgeOutNodes: Seq[Int]
) extends TileLinkProtocolParams {
// END: TileLinkProtocolParams
val minPayloadWidth = minTLPayloadWidth(new TLBundle(wideBundle))
val ingressNodes = (edgeInNodes.map(u => Seq.fill(3) (u)) ++ edgeOutNodes.map(u => Seq.fill (2) {u})).flatten
val egressNodes = (edgeInNodes.map(u => Seq.fill(2) (u)) ++ edgeOutNodes.map(u => Seq.fill (3) {u})).flatten
val nVirtualNetworks = 5
val flows = edgesIn.zipWithIndex.map { case (edgeIn, ii) => edgesOut.zipWithIndex.map { case (edgeOut, oi) =>
val reachable = edgeIn.client.clients.exists { c => edgeOut.manager.managers.exists { m =>
c.visibility.exists { ca => m.address.exists { ma =>
ca.overlaps(ma)
}}
}}
val probe = edgeIn.client.anySupportProbe && edgeOut.manager.managers.exists(_.regionType >= RegionType.TRACKED)
val release = edgeIn.client.anySupportProbe && edgeOut.manager.anySupportAcquireB
( (if (reachable) Some(FlowParams(ii * 3 + 0 , oi * 3 + 0 + edgesIn.size * 2, 4)) else None) ++ // A
(if (probe ) Some(FlowParams(oi * 2 + 0 + edgesIn.size * 3, ii * 2 + 0 , 3)) else None) ++ // B
(if (release ) Some(FlowParams(ii * 3 + 1 , oi * 3 + 1 + edgesIn.size * 2, 2)) else None) ++ // C
(if (reachable) Some(FlowParams(oi * 2 + 1 + edgesIn.size * 3, ii * 2 + 1 , 1)) else None) ++ // D
(if (release ) Some(FlowParams(ii * 3 + 2 , oi * 3 + 2 + edgesIn.size * 2, 0)) else None)) // E
}}.flatten.flatten
def interface(terminals: NoCTerminalIO,
ingressOffset: Int, egressOffset: Int, protocol: Data)(implicit p: Parameters) = {
val ingresses = terminals.ingress
val egresses = terminals.egress
protocol match { case protocol: TileLinkInterconnectInterface => {
edgesIn.zipWithIndex.map { case (e,i) =>
val nif_master = Module(new TLMasterToNoC(
e, edgesOut, inputIdRanges(i).start, inputIdRanges(i).size,
wideBundle,
(s) => s * 3 + edgesIn.size * 2 + egressOffset,
minPayloadWidth
))
nif_master.io.tilelink := DontCare
nif_master.io.tilelink.a.valid := false.B
nif_master.io.tilelink.c.valid := false.B
nif_master.io.tilelink.e.valid := false.B
TLConnect(nif_master.io.tilelink.a, protocol.in(i).a)
TLConnect(protocol.in(i).d, nif_master.io.tilelink.d)
if (protocol.in(i).params.hasBCE) {
TLConnect(protocol.in(i).b, nif_master.io.tilelink.b)
TLConnect(nif_master.io.tilelink.c, protocol.in(i).c)
TLConnect(nif_master.io.tilelink.e, protocol.in(i).e)
}
ingresses(i * 3 + 0).flit <> nif_master.io.flits.a
ingresses(i * 3 + 1).flit <> nif_master.io.flits.c
ingresses(i * 3 + 2).flit <> nif_master.io.flits.e
nif_master.io.flits.b <> egresses(i * 2 + 0).flit
nif_master.io.flits.d <> egresses(i * 2 + 1).flit
}
edgesOut.zipWithIndex.map { case (e,i) =>
val nif_slave = Module(new TLSlaveToNoC(
e, edgesIn, outputIdRanges(i).start, outputIdRanges(i).size,
wideBundle,
(s) => s * 2 + egressOffset,
minPayloadWidth
))
nif_slave.io.tilelink := DontCare
nif_slave.io.tilelink.b.valid := false.B
nif_slave.io.tilelink.d.valid := false.B
TLConnect(protocol.out(i).a, nif_slave.io.tilelink.a)
TLConnect(nif_slave.io.tilelink.d, protocol.out(i).d)
if (protocol.out(i).params.hasBCE) {
TLConnect(nif_slave.io.tilelink.b, protocol.out(i).b)
TLConnect(protocol.out(i).c, nif_slave.io.tilelink.c)
TLConnect(protocol.out(i).e, nif_slave.io.tilelink.e)
}
ingresses(i * 2 + 0 + edgesIn.size * 3).flit <> nif_slave.io.flits.b
ingresses(i * 2 + 1 + edgesIn.size * 3).flit <> nif_slave.io.flits.d
nif_slave.io.flits.a <> egresses(i * 3 + 0 + edgesIn.size * 2).flit
nif_slave.io.flits.c <> egresses(i * 3 + 1 + edgesIn.size * 2).flit
nif_slave.io.flits.e <> egresses(i * 3 + 2 + edgesIn.size * 2).flit
}
} }
}
}
case class TileLinkACDProtocolParams(
edgesIn: Seq[TLEdge],
edgesOut: Seq[TLEdge],
edgeInNodes: Seq[Int],
edgeOutNodes: Seq[Int]) extends TileLinkProtocolParams {
val minPayloadWidth = minTLPayloadWidth(Seq(genBundle.a, genBundle.c, genBundle.d).map(_.bits))
val ingressNodes = (edgeInNodes.map(u => Seq.fill(2) (u)) ++ edgeOutNodes.map(u => Seq.fill (1) {u})).flatten
val egressNodes = (edgeInNodes.map(u => Seq.fill(1) (u)) ++ edgeOutNodes.map(u => Seq.fill (2) {u})).flatten
val nVirtualNetworks = 3
val flows = edgesIn.zipWithIndex.map { case (edgeIn, ii) => edgesOut.zipWithIndex.map { case (edgeOut, oi) =>
val reachable = edgeIn.client.clients.exists { c => edgeOut.manager.managers.exists { m =>
c.visibility.exists { ca => m.address.exists { ma =>
ca.overlaps(ma)
}}
}}
val release = edgeIn.client.anySupportProbe && edgeOut.manager.anySupportAcquireB
( (if (reachable) Some(FlowParams(ii * 2 + 0 , oi * 2 + 0 + edgesIn.size * 1, 2)) else None) ++ // A
(if (release ) Some(FlowParams(ii * 2 + 1 , oi * 2 + 1 + edgesIn.size * 1, 1)) else None) ++ // C
(if (reachable) Some(FlowParams(oi * 1 + 0 + edgesIn.size * 2, ii * 1 + 0 , 0)) else None)) // D
}}.flatten.flatten
def interface(terminals: NoCTerminalIO,
ingressOffset: Int, egressOffset: Int, protocol: Data)(implicit p: Parameters) = {
val ingresses = terminals.ingress
val egresses = terminals.egress
protocol match { case protocol: TileLinkInterconnectInterface => {
protocol := DontCare
edgesIn.zipWithIndex.map { case (e,i) =>
val nif_master_acd = Module(new TLMasterACDToNoC(
e, edgesOut, inputIdRanges(i).start, inputIdRanges(i).size,
wideBundle,
(s) => s * 2 + edgesIn.size * 1 + egressOffset,
minPayloadWidth
))
nif_master_acd.io.tilelink := DontCare
nif_master_acd.io.tilelink.a.valid := false.B
nif_master_acd.io.tilelink.c.valid := false.B
nif_master_acd.io.tilelink.e.valid := false.B
TLConnect(nif_master_acd.io.tilelink.a, protocol.in(i).a)
TLConnect(protocol.in(i).d, nif_master_acd.io.tilelink.d)
if (protocol.in(i).params.hasBCE) {
TLConnect(nif_master_acd.io.tilelink.c, protocol.in(i).c)
}
ingresses(i * 2 + 0).flit <> nif_master_acd.io.flits.a
ingresses(i * 2 + 1).flit <> nif_master_acd.io.flits.c
nif_master_acd.io.flits.d <> egresses(i * 1 + 0).flit
}
edgesOut.zipWithIndex.map { case (e,i) =>
val nif_slave_acd = Module(new TLSlaveACDToNoC(
e, edgesIn, outputIdRanges(i).start, outputIdRanges(i).size,
wideBundle,
(s) => s * 1 + egressOffset,
minPayloadWidth
))
nif_slave_acd.io.tilelink := DontCare
nif_slave_acd.io.tilelink.b.valid := false.B
nif_slave_acd.io.tilelink.d.valid := false.B
TLConnect(protocol.out(i).a, nif_slave_acd.io.tilelink.a)
TLConnect(nif_slave_acd.io.tilelink.d, protocol.out(i).d)
if (protocol.out(i).params.hasBCE) {
TLConnect(protocol.out(i).c, nif_slave_acd.io.tilelink.c)
}
ingresses(i * 1 + 0 + edgesIn.size * 2).flit <> nif_slave_acd.io.flits.d
nif_slave_acd.io.flits.a <> egresses(i * 2 + 0 + edgesIn.size * 1).flit
nif_slave_acd.io.flits.c <> egresses(i * 2 + 1 + edgesIn.size * 1).flit
}
}}
}
}
case class TileLinkBEProtocolParams(
edgesIn: Seq[TLEdge],
edgesOut: Seq[TLEdge],
edgeInNodes: Seq[Int],
edgeOutNodes: Seq[Int]) extends TileLinkProtocolParams {
val minPayloadWidth = minTLPayloadWidth(Seq(genBundle.b, genBundle.e).map(_.bits))
val ingressNodes = (edgeInNodes.map(u => Seq.fill(1) (u)) ++ edgeOutNodes.map(u => Seq.fill (1) {u})).flatten
val egressNodes = (edgeInNodes.map(u => Seq.fill(1) (u)) ++ edgeOutNodes.map(u => Seq.fill (1) {u})).flatten
val nVirtualNetworks = 2
val flows = edgesIn.zipWithIndex.map { case (edgeIn, ii) => edgesOut.zipWithIndex.map { case (edgeOut, oi) =>
val probe = edgeIn.client.anySupportProbe && edgeOut.manager.managers.exists(_.regionType >= RegionType.TRACKED)
val release = edgeIn.client.anySupportProbe && edgeOut.manager.anySupportAcquireB
( (if (probe ) Some(FlowParams(oi * 1 + 0 + edgesIn.size * 1, ii * 1 + 0 , 1)) else None) ++ // B
(if (release ) Some(FlowParams(ii * 1 + 0 , oi * 1 + 0 + edgesIn.size * 1, 0)) else None)) // E
}}.flatten.flatten
def interface(terminals: NoCTerminalIO,
ingressOffset: Int, egressOffset: Int, protocol: Data)(implicit p: Parameters) = {
val ingresses = terminals.ingress
val egresses = terminals.egress
protocol match { case protocol: TileLinkInterconnectInterface => {
protocol := DontCare
edgesIn.zipWithIndex.map { case (e,i) =>
val nif_master_be = Module(new TLMasterBEToNoC(
e, edgesOut, inputIdRanges(i).start, inputIdRanges(i).size,
wideBundle,
(s) => s * 1 + edgesIn.size * 1 + egressOffset,
minPayloadWidth
))
nif_master_be.io.tilelink := DontCare
nif_master_be.io.tilelink.a.valid := false.B
nif_master_be.io.tilelink.c.valid := false.B
nif_master_be.io.tilelink.e.valid := false.B
if (protocol.in(i).params.hasBCE) {
TLConnect(protocol.in(i).b, nif_master_be.io.tilelink.b)
TLConnect(nif_master_be.io.tilelink.e, protocol.in(i).e)
}
ingresses(i * 1 + 0).flit <> nif_master_be.io.flits.e
nif_master_be.io.flits.b <> egresses(i * 1 + 0).flit
}
edgesOut.zipWithIndex.map { case (e,i) =>
val nif_slave_be = Module(new TLSlaveBEToNoC(
e, edgesIn, outputIdRanges(i).start, outputIdRanges(i).size,
wideBundle,
(s) => s * 1 + egressOffset,
minPayloadWidth
))
nif_slave_be.io.tilelink := DontCare
nif_slave_be.io.tilelink.b.valid := false.B
nif_slave_be.io.tilelink.d.valid := false.B
if (protocol.out(i).params.hasBCE) {
TLConnect(protocol.out(i).e, nif_slave_be.io.tilelink.e)
TLConnect(nif_slave_be.io.tilelink.b, protocol.out(i).b)
}
ingresses(i * 1 + 0 + edgesIn.size * 1).flit <> nif_slave_be.io.flits.b
nif_slave_be.io.flits.e <> egresses(i * 1 + 0 + edgesIn.size * 1).flit
}
}}
}
}
abstract class TLNoCLike(implicit p: Parameters) extends LazyModule {
val node = new TLNexusNode(
clientFn = { seq =>
seq(0).v1copy(
echoFields = BundleField.union(seq.flatMap(_.echoFields)),
requestFields = BundleField.union(seq.flatMap(_.requestFields)),
responseKeys = seq.flatMap(_.responseKeys).distinct,
minLatency = seq.map(_.minLatency).min,
clients = (TLXbar.mapInputIds(seq) zip seq) flatMap { case (range, port) =>
port.clients map { client => client.v1copy(
sourceId = client.sourceId.shift(range.start)
)}
}
)
},
managerFn = { seq =>
val fifoIdFactory = TLXbar.relabeler()
seq(0).v1copy(
responseFields = BundleField.union(seq.flatMap(_.responseFields)),
requestKeys = seq.flatMap(_.requestKeys).distinct,
minLatency = seq.map(_.minLatency).min,
endSinkId = TLXbar.mapOutputIds(seq).map(_.end).max,
managers = seq.flatMap { port =>
require (port.beatBytes == seq(0).beatBytes,
s"TLNoC (data widths don't match: ${port.managers.map(_.name)} has ${port.beatBytes}B vs ${seq(0).managers.map(_.name)} has ${seq(0).beatBytes}B")
// TileLink NoC does not preserve FIFO-ness, masters to this NoC should instantiate FIFOFixers
port.managers map { manager => manager.v1copy(fifoId = None) }
}
)
}
)
}
abstract class TLNoCModuleImp(outer: LazyModule) extends LazyModuleImp(outer) {
val edgesIn: Seq[TLEdge]
val edgesOut: Seq[TLEdge]
val nodeMapping: DiplomaticNetworkNodeMapping
val nocName: String
lazy val inNames = nodeMapping.genUniqueName(edgesIn.map(_.master.masters.map(_.name)))
lazy val outNames = nodeMapping.genUniqueName(edgesOut.map(_.slave.slaves.map(_.name)))
lazy val edgeInNodes = nodeMapping.getNodesIn(inNames)
lazy val edgeOutNodes = nodeMapping.getNodesOut(outNames)
def printNodeMappings() {
println(s"Constellation: TLNoC $nocName inwards mapping:")
for ((n, i) <- inNames zip edgeInNodes) {
val node = i.map(_.toString).getOrElse("X")
println(s" $node <- $n")
}
println(s"Constellation: TLNoC $nocName outwards mapping:")
for ((n, i) <- outNames zip edgeOutNodes) {
val node = i.map(_.toString).getOrElse("X")
println(s" $node <- $n")
}
}
}
trait TLNoCParams
// Instantiates a private TLNoC. Replaces the TLXbar
// BEGIN: TLNoCParams
case class SimpleTLNoCParams(
nodeMappings: DiplomaticNetworkNodeMapping,
nocParams: NoCParams = NoCParams(),
) extends TLNoCParams
class TLNoC(params: SimpleTLNoCParams, name: String = "test", inlineNoC: Boolean = false)(implicit p: Parameters) extends TLNoCLike {
// END: TLNoCParams
override def shouldBeInlined = inlineNoC
lazy val module = new TLNoCModuleImp(this) {
val (io_in, edgesIn) = node.in.unzip
val (io_out, edgesOut) = node.out.unzip
val nodeMapping = params.nodeMappings
val nocName = name
printNodeMappings()
val protocolParams = TileLinkABCDEProtocolParams(
edgesIn = edgesIn,
edgesOut = edgesOut,
edgeInNodes = edgeInNodes.flatten,
edgeOutNodes = edgeOutNodes.flatten
)
val noc = Module(new ProtocolNoC(ProtocolNoCParams(
params.nocParams.copy(hasCtrl = false, nocName=name, inlineNoC = inlineNoC),
Seq(protocolParams),
inlineNoC = inlineNoC
)))
noc.io.protocol(0) match {
case protocol: TileLinkInterconnectInterface => {
(protocol.in zip io_in).foreach { case (l,r) => l <> r }
(io_out zip protocol.out).foreach { case (l,r) => l <> r }
}
}
}
}
case class SplitACDxBETLNoCParams(
nodeMappings: DiplomaticNetworkNodeMapping,
acdNoCParams: NoCParams = NoCParams(),
beNoCParams: NoCParams = NoCParams(),
beDivision: Int = 2
) extends TLNoCParams
class TLSplitACDxBENoC(params: SplitACDxBETLNoCParams, name: String = "test", inlineNoC: Boolean = false)(implicit p: Parameters) extends TLNoCLike {
override def shouldBeInlined = inlineNoC
lazy val module = new TLNoCModuleImp(this) {
val (io_in, edgesIn) = node.in.unzip
val (io_out, edgesOut) = node.out.unzip
val nodeMapping = params.nodeMappings
val nocName = name
printNodeMappings()
val acdProtocolParams = TileLinkACDProtocolParams(
edgesIn = edgesIn,
edgesOut = edgesOut,
edgeInNodes = edgeInNodes.flatten,
edgeOutNodes = edgeOutNodes.flatten
)
val beProtocolParams = TileLinkBEProtocolParams(
edgesIn = edgesIn,
edgesOut = edgesOut,
edgeInNodes = edgeInNodes.flatten,
edgeOutNodes = edgeOutNodes.flatten
)
val acd_noc = Module(new ProtocolNoC(ProtocolNoCParams(
params.acdNoCParams.copy(hasCtrl = false, nocName=s"${name}_acd", inlineNoC = inlineNoC),
Seq(acdProtocolParams),
inlineNoC = inlineNoC
)))
val be_noc = Module(new ProtocolNoC(ProtocolNoCParams(
params.beNoCParams.copy(hasCtrl = false, nocName=s"${name}_be", inlineNoC = inlineNoC),
Seq(beProtocolParams),
widthDivision = params.beDivision,
inlineNoC = inlineNoC
)))
acd_noc.io.protocol(0) match { case protocol: TileLinkInterconnectInterface => {
(protocol.in zip io_in).foreach { case (l,r) =>
l := DontCare
l.a <> r.a
l.c <> r.c
l.d <> r.d
}
(io_out zip protocol.out).foreach { case (l,r) =>
r := DontCare
l.a <> r.a
l.c <> r.c
l.d <> r.d
}
}}
be_noc.io.protocol(0) match { case protocol: TileLinkInterconnectInterface => {
(protocol.in zip io_in).foreach { case (l,r) =>
l := DontCare
l.b <> r.b
l.e <> r.e
}
(io_out zip protocol.out).foreach { case (l,r) =>
r := DontCare
l.b <> r.b
l.e <> r.e
}
}}
}
}
case class GlobalTLNoCParams(
nodeMappings: DiplomaticNetworkNodeMapping
) extends TLNoCParams
// Maps this interconnect onto a global NoC
class TLGlobalNoC(params: GlobalTLNoCParams, name: String = "test")(implicit p: Parameters) extends TLNoCLike {
lazy val module = new TLNoCModuleImp(this) with CanAttachToGlobalNoC {
val (io_in, edgesIn) = node.in.unzip
val (io_out, edgesOut) = node.out.unzip
val nodeMapping = params.nodeMappings
val nocName = name
val protocolParams = TileLinkABCDEProtocolParams(
edgesIn = edgesIn,
edgesOut = edgesOut,
edgeInNodes = edgeInNodes.flatten,
edgeOutNodes = edgeOutNodes.flatten
)
printNodeMappings()
val io_global = IO(Flipped(protocolParams.genIO()))
io_global match {
case protocol: TileLinkInterconnectInterface => {
(protocol.in zip io_in).foreach { case (l,r) => l <> r }
(io_out zip protocol.out).foreach { case (l,r) => l <> r }
}
}
}
}
| module TLMasterACDToNoC_7( // @[Tilelink.scala:72:7]
input clock, // @[Tilelink.scala:72:7]
input reset, // @[Tilelink.scala:72:7]
output io_tilelink_a_ready, // @[Tilelink.scala:79:14]
input io_tilelink_a_valid, // @[Tilelink.scala:79:14]
input [2:0] io_tilelink_a_bits_opcode, // @[Tilelink.scala:79:14]
input [2:0] io_tilelink_a_bits_param, // @[Tilelink.scala:79:14]
input [3:0] io_tilelink_a_bits_size, // @[Tilelink.scala:79:14]
input [5:0] io_tilelink_a_bits_source, // @[Tilelink.scala:79:14]
input [31:0] io_tilelink_a_bits_address, // @[Tilelink.scala:79:14]
input [7:0] io_tilelink_a_bits_mask, // @[Tilelink.scala:79:14]
input [63:0] io_tilelink_a_bits_data, // @[Tilelink.scala:79:14]
input io_tilelink_a_bits_corrupt, // @[Tilelink.scala:79:14]
output io_tilelink_c_ready, // @[Tilelink.scala:79:14]
input io_tilelink_c_valid, // @[Tilelink.scala:79:14]
input [2:0] io_tilelink_c_bits_opcode, // @[Tilelink.scala:79:14]
input [2:0] io_tilelink_c_bits_param, // @[Tilelink.scala:79:14]
input [3:0] io_tilelink_c_bits_size, // @[Tilelink.scala:79:14]
input [5:0] io_tilelink_c_bits_source, // @[Tilelink.scala:79:14]
input [31:0] io_tilelink_c_bits_address, // @[Tilelink.scala:79:14]
input [63:0] io_tilelink_c_bits_data, // @[Tilelink.scala:79:14]
input io_tilelink_c_bits_corrupt, // @[Tilelink.scala:79:14]
input io_tilelink_d_ready, // @[Tilelink.scala:79:14]
output io_tilelink_d_valid, // @[Tilelink.scala:79:14]
output [2:0] io_tilelink_d_bits_opcode, // @[Tilelink.scala:79:14]
output [1:0] io_tilelink_d_bits_param, // @[Tilelink.scala:79:14]
output [3:0] io_tilelink_d_bits_size, // @[Tilelink.scala:79:14]
output [5:0] io_tilelink_d_bits_source, // @[Tilelink.scala:79:14]
output [4:0] io_tilelink_d_bits_sink, // @[Tilelink.scala:79:14]
output io_tilelink_d_bits_denied, // @[Tilelink.scala:79:14]
output [63:0] io_tilelink_d_bits_data, // @[Tilelink.scala:79:14]
output io_tilelink_d_bits_corrupt, // @[Tilelink.scala:79:14]
input io_flits_a_ready, // @[Tilelink.scala:79:14]
output io_flits_a_valid, // @[Tilelink.scala:79:14]
output io_flits_a_bits_head, // @[Tilelink.scala:79:14]
output io_flits_a_bits_tail, // @[Tilelink.scala:79:14]
output [72:0] io_flits_a_bits_payload, // @[Tilelink.scala:79:14]
output [4:0] io_flits_a_bits_egress_id, // @[Tilelink.scala:79:14]
input io_flits_c_ready, // @[Tilelink.scala:79:14]
output io_flits_c_valid, // @[Tilelink.scala:79:14]
output io_flits_c_bits_head, // @[Tilelink.scala:79:14]
output io_flits_c_bits_tail, // @[Tilelink.scala:79:14]
output [72:0] io_flits_c_bits_payload, // @[Tilelink.scala:79:14]
output [4:0] io_flits_c_bits_egress_id, // @[Tilelink.scala:79:14]
output io_flits_d_ready, // @[Tilelink.scala:79:14]
input io_flits_d_valid, // @[Tilelink.scala:79:14]
input io_flits_d_bits_head, // @[Tilelink.scala:79:14]
input io_flits_d_bits_tail, // @[Tilelink.scala:79:14]
input [72:0] io_flits_d_bits_payload // @[Tilelink.scala:79:14]
);
wire [64:0] _c_io_flit_bits_payload; // @[Tilelink.scala:89:17]
TLAToNoC_7 a ( // @[Tilelink.scala:88:17]
.clock (clock),
.reset (reset),
.io_protocol_ready (io_tilelink_a_ready),
.io_protocol_valid (io_tilelink_a_valid),
.io_protocol_bits_opcode (io_tilelink_a_bits_opcode),
.io_protocol_bits_param (io_tilelink_a_bits_param),
.io_protocol_bits_size (io_tilelink_a_bits_size),
.io_protocol_bits_source (io_tilelink_a_bits_source),
.io_protocol_bits_address (io_tilelink_a_bits_address),
.io_protocol_bits_mask (io_tilelink_a_bits_mask),
.io_protocol_bits_data (io_tilelink_a_bits_data),
.io_protocol_bits_corrupt (io_tilelink_a_bits_corrupt),
.io_flit_ready (io_flits_a_ready),
.io_flit_valid (io_flits_a_valid),
.io_flit_bits_head (io_flits_a_bits_head),
.io_flit_bits_tail (io_flits_a_bits_tail),
.io_flit_bits_payload (io_flits_a_bits_payload),
.io_flit_bits_egress_id (io_flits_a_bits_egress_id)
); // @[Tilelink.scala:88:17]
TLCToNoC_7 c ( // @[Tilelink.scala:89:17]
.clock (clock),
.reset (reset),
.io_protocol_ready (io_tilelink_c_ready),
.io_protocol_valid (io_tilelink_c_valid),
.io_protocol_bits_opcode (io_tilelink_c_bits_opcode),
.io_protocol_bits_param (io_tilelink_c_bits_param),
.io_protocol_bits_size (io_tilelink_c_bits_size),
.io_protocol_bits_source (io_tilelink_c_bits_source),
.io_protocol_bits_address (io_tilelink_c_bits_address),
.io_protocol_bits_data (io_tilelink_c_bits_data),
.io_protocol_bits_corrupt (io_tilelink_c_bits_corrupt),
.io_flit_ready (io_flits_c_ready),
.io_flit_valid (io_flits_c_valid),
.io_flit_bits_head (io_flits_c_bits_head),
.io_flit_bits_tail (io_flits_c_bits_tail),
.io_flit_bits_payload (_c_io_flit_bits_payload),
.io_flit_bits_egress_id (io_flits_c_bits_egress_id)
); // @[Tilelink.scala:89:17]
TLDFromNoC_1 d ( // @[Tilelink.scala:90:17]
.clock (clock),
.reset (reset),
.io_protocol_ready (io_tilelink_d_ready),
.io_protocol_valid (io_tilelink_d_valid),
.io_protocol_bits_opcode (io_tilelink_d_bits_opcode),
.io_protocol_bits_param (io_tilelink_d_bits_param),
.io_protocol_bits_size (io_tilelink_d_bits_size),
.io_protocol_bits_source (io_tilelink_d_bits_source),
.io_protocol_bits_sink (io_tilelink_d_bits_sink),
.io_protocol_bits_denied (io_tilelink_d_bits_denied),
.io_protocol_bits_data (io_tilelink_d_bits_data),
.io_protocol_bits_corrupt (io_tilelink_d_bits_corrupt),
.io_flit_ready (io_flits_d_ready),
.io_flit_valid (io_flits_d_valid),
.io_flit_bits_head (io_flits_d_bits_head),
.io_flit_bits_tail (io_flits_d_bits_tail),
.io_flit_bits_payload (io_flits_d_bits_payload[64:0]) // @[Tilelink.scala:97:14]
); // @[Tilelink.scala:90:17]
assign io_flits_c_bits_payload = {8'h0, _c_io_flit_bits_payload}; // @[Tilelink.scala:72:7, :89:17, :96:14]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_251( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19]
wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_22( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [28:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [3:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [28:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7]
wire [3:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [8:0] c_first_beats1_decode = 9'h0; // @[Edges.scala:220:59]
wire [8:0] c_first_beats1 = 9'h0; // @[Edges.scala:221:14]
wire [8:0] _c_first_count_T = 9'h0; // @[Edges.scala:234:27]
wire [8:0] c_first_count = 9'h0; // @[Edges.scala:234:25]
wire [8:0] _c_first_counter_T = 9'h0; // @[Edges.scala:236:21]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_27 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_29 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_33 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_35 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_39 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_41 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_54 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_56 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_60 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_62 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_66 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_68 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_72 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_74 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_78 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_80 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_84 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_86 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_90 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_92 = 1'h1; // @[Parameters.scala:57:20]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [8:0] c_first_counter1 = 9'h1FF; // @[Edges.scala:230:28]
wire [9:0] _c_first_counter1_T = 10'h3FF; // @[Edges.scala:230:28]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_first_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_first_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_first_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_first_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_set_wo_ready_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_set_wo_ready_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_opcodes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_opcodes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_sizes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_sizes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_opcodes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_opcodes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_sizes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_sizes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_probe_ack_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_probe_ack_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_probe_ack_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_probe_ack_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _same_cycle_resp_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _same_cycle_resp_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _same_cycle_resp_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _same_cycle_resp_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _same_cycle_resp_WIRE_4_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _same_cycle_resp_WIRE_5_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_wo_ready_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_wo_ready_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_first_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_first_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_first_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_first_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] _c_set_wo_ready_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_set_wo_ready_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_opcodes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_sizes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_opcodes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_sizes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_sizes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_probe_ack_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_probe_ack_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_probe_ack_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_probe_ack_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hFF; // @[Monitor.scala:612:57]
wire [15:0] _c_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hFF; // @[Monitor.scala:724:57]
wire [16:0] _a_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hFF; // @[Monitor.scala:612:57]
wire [16:0] _c_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hFF; // @[Monitor.scala:724:57]
wire [15:0] _a_size_lookup_T_3 = 16'h100; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h100; // @[Monitor.scala:612:51]
wire [15:0] _c_size_lookup_T_3 = 16'h100; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h100; // @[Monitor.scala:724:51]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [1027:0] _c_sizes_set_T_1 = 1028'h0; // @[Monitor.scala:768:52]
wire [9:0] _c_opcodes_set_T = 10'h0; // @[Monitor.scala:767:79]
wire [9:0] _c_sizes_set_T = 10'h0; // @[Monitor.scala:768:77]
wire [1026:0] _c_opcodes_set_T_1 = 1027'h0; // @[Monitor.scala:767:54]
wire [4:0] _c_sizes_set_interm_T_1 = 5'h1; // @[Monitor.scala:766:59]
wire [4:0] c_sizes_set_interm = 5'h0; // @[Monitor.scala:755:40]
wire [4:0] _c_sizes_set_interm_T = 5'h0; // @[Monitor.scala:766:51]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [127:0] _c_set_wo_ready_T = 128'h1; // @[OneHot.scala:58:35]
wire [127:0] _c_set_T = 128'h1; // @[OneHot.scala:58:35]
wire [519:0] c_sizes_set = 520'h0; // @[Monitor.scala:741:34]
wire [259:0] c_opcodes_set = 260'h0; // @[Monitor.scala:740:34]
wire [64:0] c_set = 65'h0; // @[Monitor.scala:738:34]
wire [64:0] c_set_wo_ready = 65'h0; // @[Monitor.scala:739:34]
wire [11:0] _c_first_beats1_decode_T_2 = 12'h0; // @[package.scala:243:46]
wire [11:0] _c_first_beats1_decode_T_1 = 12'hFFF; // @[package.scala:243:76]
wire [26:0] _c_first_beats1_decode_T = 27'hFFF; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _a_size_lookup_T_2 = 4'h8; // @[Monitor.scala:641:117]
wire [3:0] _d_sizes_clr_T = 4'h8; // @[Monitor.scala:681:48]
wire [3:0] _c_size_lookup_T_2 = 4'h8; // @[Monitor.scala:750:119]
wire [3:0] _d_sizes_clr_T_6 = 4'h8; // @[Monitor.scala:791:48]
wire [3:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [6:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_44 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_45 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_46 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_47 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_48 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_49 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_50 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_51 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_52 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_53 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_54 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_55 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_56 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_57 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_58 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_59 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_60 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_61 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_62 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_63 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_64 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_65 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_66 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_67 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_68 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_69 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_70 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_71 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_72 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_73 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_74 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_75 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_76 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_8 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_9 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_10 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_11 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_12 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_13 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = io_in_a_bits_source_0 == 7'h30; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_13 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_19 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_2 = _source_ok_T_1 == 5'h8; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_8 = _source_ok_T_7 == 5'h9; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_14 = _source_ok_T_13 == 5'hA; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_20 = _source_ok_T_19 == 5'hB; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[2:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] _source_ok_T_25 = io_in_a_bits_source_0[6:3]; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_T_31 = io_in_a_bits_source_0[6:3]; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_T_37 = io_in_a_bits_source_0[6:3]; // @[Monitor.scala:36:7]
wire _source_ok_T_26 = _source_ok_T_25 == 4'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_28 = _source_ok_T_26; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_30 = _source_ok_T_28; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_5 = _source_ok_T_30; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[2:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_32 = _source_ok_T_31 == 4'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_34 = _source_ok_T_32; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_36 = _source_ok_T_34; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_6 = _source_ok_T_36; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[2:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_38 = _source_ok_T_37 == 4'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_40 = _source_ok_T_38; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_42 = _source_ok_T_40; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_7 = _source_ok_T_42; // @[Parameters.scala:1138:31]
wire _source_ok_T_43 = io_in_a_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_8 = _source_ok_T_43; // @[Parameters.scala:1138:31]
wire _source_ok_T_44 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_45 = _source_ok_T_44 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_46 = _source_ok_T_45 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_47 = _source_ok_T_46 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_48 = _source_ok_T_47 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_49 = _source_ok_T_48 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_50 = _source_ok_T_49 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_50 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46]
wire [26:0] _GEN = 27'hFFF << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [26:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [26:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [26:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [11:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [28:0] _is_aligned_T = {17'h0, io_in_a_bits_address_0[11:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 29'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 4'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_4 = _uncommonBits_T_4[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_5 = _uncommonBits_T_5[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_6 = _uncommonBits_T_6[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_11 = _uncommonBits_T_11[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_12 = _uncommonBits_T_12[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_13 = _uncommonBits_T_13[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_18 = _uncommonBits_T_18[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_19 = _uncommonBits_T_19[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_20 = _uncommonBits_T_20[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_25 = _uncommonBits_T_25[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_26 = _uncommonBits_T_26[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_27 = _uncommonBits_T_27[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_32 = _uncommonBits_T_32[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_33 = _uncommonBits_T_33[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_34 = _uncommonBits_T_34[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_39 = _uncommonBits_T_39[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_40 = _uncommonBits_T_40[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_41 = _uncommonBits_T_41[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_44 = _uncommonBits_T_44[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_46 = _uncommonBits_T_46[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_47 = _uncommonBits_T_47[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_48 = _uncommonBits_T_48[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_49 = _uncommonBits_T_49[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_52 = _uncommonBits_T_52[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_53 = _uncommonBits_T_53[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_54 = _uncommonBits_T_54[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_55 = _uncommonBits_T_55[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_56 = _uncommonBits_T_56[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_57 = _uncommonBits_T_57[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_58 = _uncommonBits_T_58[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_59 = _uncommonBits_T_59[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_60 = _uncommonBits_T_60[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_61 = _uncommonBits_T_61[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_62 = _uncommonBits_T_62[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_63 = _uncommonBits_T_63[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_64 = _uncommonBits_T_64[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_65 = _uncommonBits_T_65[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_66 = _uncommonBits_T_66[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_67 = _uncommonBits_T_67[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_68 = _uncommonBits_T_68[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_69 = _uncommonBits_T_69[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_70 = _uncommonBits_T_70[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_71 = _uncommonBits_T_71[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_72 = _uncommonBits_T_72[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_73 = _uncommonBits_T_73[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_74 = _uncommonBits_T_74[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_75 = _uncommonBits_T_75[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_76 = _uncommonBits_T_76[2:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_51 = io_in_d_bits_source_0 == 7'h30; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_51; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_52 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_58 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_64 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_70 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_53 = _source_ok_T_52 == 5'h8; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_55 = _source_ok_T_53; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_57 = _source_ok_T_55; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_1 = _source_ok_T_57; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_59 = _source_ok_T_58 == 5'h9; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_61 = _source_ok_T_59; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_63 = _source_ok_T_61; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_2 = _source_ok_T_63; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_65 = _source_ok_T_64 == 5'hA; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_67 = _source_ok_T_65; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_69 = _source_ok_T_67; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_3 = _source_ok_T_69; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_10 = _source_ok_uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_71 = _source_ok_T_70 == 5'hB; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_73 = _source_ok_T_71; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_75 = _source_ok_T_73; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_4 = _source_ok_T_75; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_11 = _source_ok_uncommonBits_T_11[2:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] _source_ok_T_76 = io_in_d_bits_source_0[6:3]; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_T_82 = io_in_d_bits_source_0[6:3]; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_T_88 = io_in_d_bits_source_0[6:3]; // @[Monitor.scala:36:7]
wire _source_ok_T_77 = _source_ok_T_76 == 4'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_79 = _source_ok_T_77; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_81 = _source_ok_T_79; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_5 = _source_ok_T_81; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_12 = _source_ok_uncommonBits_T_12[2:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_83 = _source_ok_T_82 == 4'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_85 = _source_ok_T_83; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_87 = _source_ok_T_85; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_6 = _source_ok_T_87; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_13 = _source_ok_uncommonBits_T_13[2:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_89 = _source_ok_T_88 == 4'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_91 = _source_ok_T_89; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_93 = _source_ok_T_91; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_7 = _source_ok_T_93; // @[Parameters.scala:1138:31]
wire _source_ok_T_94 = io_in_d_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_8 = _source_ok_T_94; // @[Parameters.scala:1138:31]
wire _source_ok_T_95 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_96 = _source_ok_T_95 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_97 = _source_ok_T_96 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_98 = _source_ok_T_97 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_99 = _source_ok_T_98 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_100 = _source_ok_T_99 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_101 = _source_ok_T_100 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_101 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46]
wire _T_1720 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_1720; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_1720; // @[Decoupled.scala:51:35]
wire [11:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [8:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [8:0] a_first_counter; // @[Edges.scala:229:27]
wire [9:0] _a_first_counter1_T = {1'h0, a_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] a_first_counter1 = _a_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [8:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [3:0] size; // @[Monitor.scala:389:22]
reg [6:0] source; // @[Monitor.scala:390:22]
reg [28:0] address; // @[Monitor.scala:391:22]
wire _T_1793 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1793; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1793; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1793; // @[Decoupled.scala:51:35]
wire [26:0] _GEN_0 = 27'hFFF << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [11:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [8:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T = {1'h0, d_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1 = _d_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [3:0] size_1; // @[Monitor.scala:540:22]
reg [6:0] source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [64:0] inflight; // @[Monitor.scala:614:27]
reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [519:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [11:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [8:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [8:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] a_first_counter1_1 = _a_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [8:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [11:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire [8:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1_1 = _d_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [64:0] a_set; // @[Monitor.scala:626:34]
wire [64:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [259:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [519:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [9:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [9:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [9:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [9:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [9:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [259:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [259:0] _a_opcode_lookup_T_6 = {256'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [259:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [7:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [9:0] _GEN_2 = {io_in_d_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :641:65]
wire [9:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65]
wire [9:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_2; // @[Monitor.scala:641:65, :681:99]
wire [9:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65, :750:67]
wire [9:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_2; // @[Monitor.scala:641:65, :791:99]
wire [519:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [519:0] _a_size_lookup_T_6 = {512'h0, _a_size_lookup_T_1[7:0]}; // @[Monitor.scala:641:{40,91}]
wire [519:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[519:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[7:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [4:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [127:0] _GEN_3 = 128'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_3; // @[OneHot.scala:58:35]
wire [127:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_3; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1646 = _T_1720 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_1646 ? _a_set_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_1646 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [4:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [4:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[4:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_1646 ? _a_sizes_set_interm_T_1 : 5'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [9:0] _a_opcodes_set_T = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [1026:0] _a_opcodes_set_T_1 = {1023'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_1646 ? _a_opcodes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [9:0] _a_sizes_set_T = {io_in_a_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :660:77]
wire [1027:0] _a_sizes_set_T_1 = {1023'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_1646 ? _a_sizes_set_T_1[519:0] : 520'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [64:0] d_clr; // @[Monitor.scala:664:34]
wire [64:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [259:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [519:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_1692 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [127:0] _GEN_5 = 128'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_1692 & ~d_release_ack ? _d_clr_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1661 = _T_1793 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_1661 ? _d_clr_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_5 = 1039'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_1661 ? _d_opcodes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [1038:0] _d_sizes_clr_T_5 = 1039'hFF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_1661 ? _d_sizes_clr_T_5[519:0] : 520'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [64:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [64:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [64:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [259:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [259:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [259:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [519:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [519:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [519:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [64:0] inflight_1; // @[Monitor.scala:726:35]
wire [64:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [259:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [259:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [519:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [519:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [11:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[11:3]; // @[package.scala:243:46]
wire [8:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1_2 = _d_first_counter1_T_2[8:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [7:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [259:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [259:0] _c_opcode_lookup_T_6 = {256'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [259:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [519:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [519:0] _c_size_lookup_T_6 = {512'h0, _c_size_lookup_T_1[7:0]}; // @[Monitor.scala:750:{42,93}]
wire [519:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[519:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[7:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [64:0] d_clr_1; // @[Monitor.scala:774:34]
wire [64:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [259:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [519:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_1764 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_1764 & d_release_ack_1 ? _d_clr_wo_ready_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1746 = _T_1793 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_1746 ? _d_clr_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_11 = 1039'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_1746 ? _d_opcodes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [1038:0] _d_sizes_clr_T_11 = 1039'hFF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_1746 ? _d_sizes_clr_T_11[519:0] : 520'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 7'h0; // @[Monitor.scala:36:7, :795:113]
wire [64:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [64:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [259:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [259:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [519:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [519:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_77( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_87 io_out_sink_valid_1 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_64( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [20:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [7:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [20:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [7:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_27 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_29 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_33 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_35 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_53 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_55 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_59 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_61 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_65 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_67 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_71 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_73 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_77 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_79 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_83 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_85 = 1'h1; // @[Parameters.scala:57:20]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_first_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_first_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_first_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_first_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_set_wo_ready_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_set_wo_ready_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_opcodes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_opcodes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_sizes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_sizes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_opcodes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_opcodes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_sizes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_sizes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_probe_ack_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_probe_ack_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_probe_ack_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_probe_ack_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_4_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_5_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_first_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_first_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_first_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_first_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_set_wo_ready_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_set_wo_ready_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_set_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_set_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_opcodes_set_interm_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_opcodes_set_interm_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_sizes_set_interm_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_sizes_set_interm_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_opcodes_set_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_opcodes_set_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_sizes_set_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_sizes_set_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_probe_ack_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_probe_ack_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_probe_ack_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_probe_ack_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _same_cycle_resp_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _same_cycle_resp_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _same_cycle_resp_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _same_cycle_resp_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _same_cycle_resp_WIRE_4_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _same_cycle_resp_WIRE_5_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [2050:0] _c_opcodes_set_T_1 = 2051'h0; // @[Monitor.scala:767:54]
wire [2050:0] _c_sizes_set_T_1 = 2051'h0; // @[Monitor.scala:768:52]
wire [10:0] _c_opcodes_set_T = 11'h0; // @[Monitor.scala:767:79]
wire [10:0] _c_sizes_set_T = 11'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [255:0] _c_set_wo_ready_T = 256'h1; // @[OneHot.scala:58:35]
wire [255:0] _c_set_T = 256'h1; // @[OneHot.scala:58:35]
wire [515:0] c_opcodes_set = 516'h0; // @[Monitor.scala:740:34]
wire [515:0] c_sizes_set = 516'h0; // @[Monitor.scala:741:34]
wire [128:0] c_set = 129'h0; // @[Monitor.scala:738:34]
wire [128:0] c_set_wo_ready = 129'h0; // @[Monitor.scala:739:34]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [7:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_44 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_45 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_46 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_47 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_48 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_49 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_50 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_51 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_52 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_53 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_54 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_55 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_56 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_57 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_58 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_59 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_60 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_61 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_62 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_63 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_64 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_65 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_8 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_9 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_10 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_11 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = io_in_a_bits_source_0 == 8'h30; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [5:0] _source_ok_T_1 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_7 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_13 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_19 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_2 = _source_ok_T_1 == 6'h8; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_8 = _source_ok_T_7 == 6'h9; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_14 = _source_ok_T_13 == 6'hA; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_20 = _source_ok_T_19 == 6'hB; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31]
wire [3:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] _source_ok_T_25 = io_in_a_bits_source_0[7:4]; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_T_31 = io_in_a_bits_source_0[7:4]; // @[Monitor.scala:36:7]
wire _source_ok_T_26 = _source_ok_T_25 == 4'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_28 = _source_ok_T_26; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_30 = _source_ok_T_28; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_5 = _source_ok_T_30; // @[Parameters.scala:1138:31]
wire [3:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[3:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_32 = _source_ok_T_31 == 4'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_34 = _source_ok_T_32; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_36 = _source_ok_T_34; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_6 = _source_ok_T_36; // @[Parameters.scala:1138:31]
wire _source_ok_T_37 = io_in_a_bits_source_0 == 8'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_7 = _source_ok_T_37; // @[Parameters.scala:1138:31]
wire _source_ok_T_38 = io_in_a_bits_source_0 == 8'h41; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_8 = _source_ok_T_38; // @[Parameters.scala:1138:31]
wire _source_ok_T_39 = io_in_a_bits_source_0 == 8'h42; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_9 = _source_ok_T_39; // @[Parameters.scala:1138:31]
wire _source_ok_T_40 = io_in_a_bits_source_0 == 8'h80; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_10 = _source_ok_T_40; // @[Parameters.scala:1138:31]
wire _source_ok_T_41 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_42 = _source_ok_T_41 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_43 = _source_ok_T_42 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_44 = _source_ok_T_43 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_45 = _source_ok_T_44 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_46 = _source_ok_T_45 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_47 = _source_ok_T_46 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_48 = _source_ok_T_47 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_49 = _source_ok_T_48 | _source_ok_WIRE_9; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_49 | _source_ok_WIRE_10; // @[Parameters.scala:1138:31, :1139:46]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [20:0] _is_aligned_T = {15'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 21'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_4 = _uncommonBits_T_4[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_5 = _uncommonBits_T_5[3:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_10 = _uncommonBits_T_10[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_11 = _uncommonBits_T_11[3:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_16 = _uncommonBits_T_16[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_17 = _uncommonBits_T_17[3:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_22 = _uncommonBits_T_22[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_23 = _uncommonBits_T_23[3:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_28 = _uncommonBits_T_28[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_29 = _uncommonBits_T_29[3:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_34 = _uncommonBits_T_34[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_35 = _uncommonBits_T_35[3:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_39 = _uncommonBits_T_39[1:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_40 = _uncommonBits_T_40[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_41 = _uncommonBits_T_41[3:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_44 = _uncommonBits_T_44[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_46 = _uncommonBits_T_46[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_47 = _uncommonBits_T_47[3:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_48 = _uncommonBits_T_48[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_49 = _uncommonBits_T_49[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_52 = _uncommonBits_T_52[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_53 = _uncommonBits_T_53[3:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_54 = _uncommonBits_T_54[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_55 = _uncommonBits_T_55[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_56 = _uncommonBits_T_56[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_57 = _uncommonBits_T_57[1:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_58 = _uncommonBits_T_58[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_59 = _uncommonBits_T_59[3:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_60 = _uncommonBits_T_60[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_61 = _uncommonBits_T_61[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_62 = _uncommonBits_T_62[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_63 = _uncommonBits_T_63[1:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_64 = _uncommonBits_T_64[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_65 = _uncommonBits_T_65[3:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_50 = io_in_d_bits_source_0 == 8'h30; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_50; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire [5:0] _source_ok_T_51 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_57 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_63 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_69 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_52 = _source_ok_T_51 == 6'h8; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_54 = _source_ok_T_52; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_56 = _source_ok_T_54; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_1 = _source_ok_T_56; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_58 = _source_ok_T_57 == 6'h9; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_60 = _source_ok_T_58; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_62 = _source_ok_T_60; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_2 = _source_ok_T_62; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_64 = _source_ok_T_63 == 6'hA; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_66 = _source_ok_T_64; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_68 = _source_ok_T_66; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_3 = _source_ok_T_68; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_70 = _source_ok_T_69 == 6'hB; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_72 = _source_ok_T_70; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_74 = _source_ok_T_72; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_4 = _source_ok_T_74; // @[Parameters.scala:1138:31]
wire [3:0] source_ok_uncommonBits_10 = _source_ok_uncommonBits_T_10[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] _source_ok_T_75 = io_in_d_bits_source_0[7:4]; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_T_81 = io_in_d_bits_source_0[7:4]; // @[Monitor.scala:36:7]
wire _source_ok_T_76 = _source_ok_T_75 == 4'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_78 = _source_ok_T_76; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_80 = _source_ok_T_78; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_5 = _source_ok_T_80; // @[Parameters.scala:1138:31]
wire [3:0] source_ok_uncommonBits_11 = _source_ok_uncommonBits_T_11[3:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_82 = _source_ok_T_81 == 4'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_84 = _source_ok_T_82; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_86 = _source_ok_T_84; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_6 = _source_ok_T_86; // @[Parameters.scala:1138:31]
wire _source_ok_T_87 = io_in_d_bits_source_0 == 8'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_7 = _source_ok_T_87; // @[Parameters.scala:1138:31]
wire _source_ok_T_88 = io_in_d_bits_source_0 == 8'h41; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_8 = _source_ok_T_88; // @[Parameters.scala:1138:31]
wire _source_ok_T_89 = io_in_d_bits_source_0 == 8'h42; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_9 = _source_ok_T_89; // @[Parameters.scala:1138:31]
wire _source_ok_T_90 = io_in_d_bits_source_0 == 8'h80; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_10 = _source_ok_T_90; // @[Parameters.scala:1138:31]
wire _source_ok_T_91 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_92 = _source_ok_T_91 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_93 = _source_ok_T_92 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_94 = _source_ok_T_93 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_95 = _source_ok_T_94 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_96 = _source_ok_T_95 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_97 = _source_ok_T_96 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_98 = _source_ok_T_97 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_99 = _source_ok_T_98 | _source_ok_WIRE_1_9; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_99 | _source_ok_WIRE_1_10; // @[Parameters.scala:1138:31, :1139:46]
wire _T_1266 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_1266; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_1266; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [7:0] source; // @[Monitor.scala:390:22]
reg [20:0] address; // @[Monitor.scala:391:22]
wire _T_1334 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1334; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1334; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1334; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [7:0] source_1; // @[Monitor.scala:541:22]
reg [128:0] inflight; // @[Monitor.scala:614:27]
reg [515:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [515:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [128:0] a_set; // @[Monitor.scala:626:34]
wire [128:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [515:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [515:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [10:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [10:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [10:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [10:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [10:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [10:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [10:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [10:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [10:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [515:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [515:0] _a_opcode_lookup_T_6 = {512'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [515:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[515:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [515:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [515:0] _a_size_lookup_T_6 = {512'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [515:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[515:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [255:0] _GEN_2 = 256'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [255:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [255:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire _T_1199 = _T_1266 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_1199 ? _a_set_T[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_1199 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_1199 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [10:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [10:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [10:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [2050:0] _a_opcodes_set_T_1 = {2047'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_1199 ? _a_opcodes_set_T_1[515:0] : 516'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [2050:0] _a_sizes_set_T_1 = {2047'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_1199 ? _a_sizes_set_T_1[515:0] : 516'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [128:0] d_clr; // @[Monitor.scala:664:34]
wire [128:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [515:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [515:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_1245 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [255:0] _GEN_5 = 256'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [255:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [255:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [255:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [255:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_1245 & ~d_release_ack ? _d_clr_wo_ready_T[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire _T_1214 = _T_1334 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_1214 ? _d_clr_T[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire [2062:0] _d_opcodes_clr_T_5 = 2063'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_1214 ? _d_opcodes_clr_T_5[515:0] : 516'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [2062:0] _d_sizes_clr_T_5 = 2063'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_1214 ? _d_sizes_clr_T_5[515:0] : 516'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [128:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [128:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [128:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [515:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [515:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [515:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [515:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [515:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [515:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [128:0] inflight_1; // @[Monitor.scala:726:35]
wire [128:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [515:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [515:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [515:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [515:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [515:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [515:0] _c_opcode_lookup_T_6 = {512'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [515:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[515:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [515:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [515:0] _c_size_lookup_T_6 = {512'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [515:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[515:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [128:0] d_clr_1; // @[Monitor.scala:774:34]
wire [128:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [515:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [515:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_1310 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_1310 & d_release_ack_1 ? _d_clr_wo_ready_T_1[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire _T_1292 = _T_1334 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_1292 ? _d_clr_T_1[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire [2062:0] _d_opcodes_clr_T_11 = 2063'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_1292 ? _d_opcodes_clr_T_11[515:0] : 516'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [2062:0] _d_sizes_clr_T_11 = 2063'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_1292 ? _d_sizes_clr_T_11[515:0] : 516'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 8'h0; // @[Monitor.scala:36:7, :795:113]
wire [128:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [128:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [515:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [515:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [515:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [515:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_22( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_22 io_out_source_extend ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftRegisterPriorityQueue.scala:
package compressacc
import chisel3._
import chisel3.util._
import chisel3.util._
// TODO : support enq & deq at the same cycle
class PriorityQueueStageIO(keyWidth: Int, value: ValueInfo) extends Bundle {
val output_prev = KeyValue(keyWidth, value)
val output_nxt = KeyValue(keyWidth, value)
val input_prev = Flipped(KeyValue(keyWidth, value))
val input_nxt = Flipped(KeyValue(keyWidth, value))
val cmd = Flipped(Valid(UInt(1.W)))
val insert_here = Input(Bool())
val cur_input_keyval = Flipped(KeyValue(keyWidth, value))
val cur_output_keyval = KeyValue(keyWidth, value)
}
class PriorityQueueStage(keyWidth: Int, value: ValueInfo) extends Module {
val io = IO(new PriorityQueueStageIO(keyWidth, value))
dontTouch(io)
val CMD_DEQ = 0.U
val CMD_ENQ = 1.U
val MAX_VALUE = (1 << keyWidth) - 1
val key_reg = RegInit(MAX_VALUE.U(keyWidth.W))
val value_reg = Reg(value)
io.output_prev.key := key_reg
io.output_prev.value := value_reg
io.output_nxt.key := key_reg
io.output_nxt.value := value_reg
io.cur_output_keyval.key := key_reg
io.cur_output_keyval.value := value_reg
when (io.cmd.valid) {
switch (io.cmd.bits) {
is (CMD_DEQ) {
key_reg := io.input_nxt.key
value_reg := io.input_nxt.value
}
is (CMD_ENQ) {
when (io.insert_here) {
key_reg := io.cur_input_keyval.key
value_reg := io.cur_input_keyval.value
} .elsewhen (key_reg >= io.cur_input_keyval.key) {
key_reg := io.input_prev.key
value_reg := io.input_prev.value
} .otherwise {
// do nothing
}
}
}
}
}
object PriorityQueueStage {
def apply(keyWidth: Int, v: ValueInfo): PriorityQueueStage = new PriorityQueueStage(keyWidth, v)
}
// TODO
// - This design is not scalable as the enqued_keyval is broadcasted to all the stages
// - Add pipeline registers later
class PriorityQueueIO(queSize: Int, keyWidth: Int, value: ValueInfo) extends Bundle {
val cnt_bits = log2Ceil(queSize+1)
val counter = Output(UInt(cnt_bits.W))
val enq = Flipped(Decoupled(KeyValue(keyWidth, value)))
val deq = Decoupled(KeyValue(keyWidth, value))
}
class PriorityQueue(queSize: Int, keyWidth: Int, value: ValueInfo) extends Module {
val keyWidthInternal = keyWidth + 1
val CMD_DEQ = 0.U
val CMD_ENQ = 1.U
val io = IO(new PriorityQueueIO(queSize, keyWidthInternal, value))
dontTouch(io)
val MAX_VALUE = ((1 << keyWidthInternal) - 1).U
val cnt_bits = log2Ceil(queSize+1)
// do not consider cases where we are inserting more entries then the queSize
val counter = RegInit(0.U(cnt_bits.W))
io.counter := counter
val full = (counter === queSize.U)
val empty = (counter === 0.U)
io.deq.valid := !empty
io.enq.ready := !full
when (io.enq.fire) {
counter := counter + 1.U
}
when (io.deq.fire) {
counter := counter - 1.U
}
val cmd_valid = io.enq.valid || io.deq.ready
val cmd = Mux(io.enq.valid, CMD_ENQ, CMD_DEQ)
assert(!(io.enq.valid && io.deq.ready))
val stages = Seq.fill(queSize)(Module(new PriorityQueueStage(keyWidthInternal, value)))
for (i <- 0 until (queSize - 1)) {
stages(i+1).io.input_prev <> stages(i).io.output_nxt
stages(i).io.input_nxt <> stages(i+1).io.output_prev
}
stages(queSize-1).io.input_nxt.key := MAX_VALUE
// stages(queSize-1).io.input_nxt.value :=
stages(queSize-1).io.input_nxt.value.symbol := 0.U
// stages(queSize-1).io.input_nxt.value.child(0) := 0.U
// stages(queSize-1).io.input_nxt.value.child(1) := 0.U
stages(0).io.input_prev.key := io.enq.bits.key
stages(0).io.input_prev.value <> io.enq.bits.value
for (i <- 0 until queSize) {
stages(i).io.cmd.valid := cmd_valid
stages(i).io.cmd.bits := cmd
stages(i).io.cur_input_keyval <> io.enq.bits
}
val is_large_or_equal = WireInit(VecInit(Seq.fill(queSize)(false.B)))
for (i <- 0 until queSize) {
is_large_or_equal(i) := (stages(i).io.cur_output_keyval.key >= io.enq.bits.key)
}
val is_large_or_equal_cat = Wire(UInt(queSize.W))
is_large_or_equal_cat := Cat(is_large_or_equal.reverse)
val insert_here_idx = PriorityEncoder(is_large_or_equal_cat)
for (i <- 0 until queSize) {
when (i.U === insert_here_idx) {
stages(i).io.insert_here := true.B
} .otherwise {
stages(i).io.insert_here := false.B
}
}
io.deq.bits <> stages(0).io.output_prev
}
| module PriorityQueueStage_147( // @[ShiftRegisterPriorityQueue.scala:21:7]
input clock, // @[ShiftRegisterPriorityQueue.scala:21:7]
input reset, // @[ShiftRegisterPriorityQueue.scala:21:7]
output [30:0] io_output_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_output_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [30:0] io_output_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_output_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_input_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_input_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_input_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_input_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_cmd_valid, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_cmd_bits, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_insert_here, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_cur_input_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_cur_input_keyval_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [30:0] io_cur_output_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_cur_output_keyval_value_symbol // @[ShiftRegisterPriorityQueue.scala:22:14]
);
wire [30:0] io_input_prev_key_0 = io_input_prev_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_input_prev_value_symbol_0 = io_input_prev_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_input_nxt_key_0 = io_input_nxt_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_input_nxt_value_symbol_0 = io_input_nxt_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_cmd_valid_0 = io_cmd_valid; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_cmd_bits_0 = io_cmd_bits; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_insert_here_0 = io_insert_here; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_cur_input_keyval_key_0 = io_cur_input_keyval_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_cur_input_keyval_value_symbol_0 = io_cur_input_keyval_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
reg [30:0] key_reg; // @[ShiftRegisterPriorityQueue.scala:30:24]
assign io_output_prev_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
assign io_output_nxt_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
assign io_cur_output_keyval_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
reg [9:0] value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:31:22]
assign io_output_prev_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
assign io_output_nxt_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
assign io_cur_output_keyval_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
wire _T_2 = key_reg >= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24, :52:30]
always @(posedge clock) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (reset) // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= 31'h7FFFFFFF; // @[ShiftRegisterPriorityQueue.scala:30:24]
else if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30]
key_reg <= io_input_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
end
else // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= io_input_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
end
if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7]
value_reg_symbol <= io_cur_input_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30]
value_reg_symbol <= io_input_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
end
else // @[ShiftRegisterPriorityQueue.scala:21:7]
value_reg_symbol <= io_input_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
end
always @(posedge)
assign io_output_prev_key = io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_prev_value_symbol = io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_nxt_key = io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_nxt_value_symbol = io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_cur_output_keyval_key = io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_cur_output_keyval_value_symbol = io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_83( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_127 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File InputUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{FlowRoutingBundle}
import constellation.noc.{HasNoCParams}
class AbstractInputUnitIO(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams],
)(implicit val p: Parameters) extends Bundle with HasRouterOutputParams {
val nodeId = cParam.destId
val router_req = Decoupled(new RouteComputerReq)
val router_resp = Input(new RouteComputerResp(outParams, egressParams))
val vcalloc_req = Decoupled(new VCAllocReq(cParam, outParams, egressParams))
val vcalloc_resp = Input(new VCAllocResp(outParams, egressParams))
val out_credit_available = Input(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }))
val salloc_req = Vec(cParam.destSpeedup, Decoupled(new SwitchAllocReq(outParams, egressParams)))
val out = Vec(cParam.destSpeedup, Valid(new SwitchBundle(outParams, egressParams)))
val debug = Output(new Bundle {
val va_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
val sa_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
})
val block = Input(Bool())
}
abstract class AbstractInputUnit(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams]
)(implicit val p: Parameters) extends Module with HasRouterOutputParams with HasNoCParams {
val nodeId = cParam.destId
def io: AbstractInputUnitIO
}
class InputBuffer(cParam: ChannelParams)(implicit p: Parameters) extends Module {
val nVirtualChannels = cParam.nVirtualChannels
val io = IO(new Bundle {
val enq = Flipped(Vec(cParam.srcSpeedup, Valid(new Flit(cParam.payloadBits))))
val deq = Vec(cParam.nVirtualChannels, Decoupled(new BaseFlit(cParam.payloadBits)))
})
val useOutputQueues = cParam.useOutputQueues
val delims = if (useOutputQueues) {
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize else 0).scanLeft(0)(_+_)
} else {
// If no queuing, have to add an additional slot since head == tail implies empty
// TODO this should be fixed, should use all slots available
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize + 1 else 0).scanLeft(0)(_+_)
}
val starts = delims.dropRight(1).zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val ends = delims.tail.zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val fullSize = delims.last
// Ugly case. Use multiple queues
if ((cParam.srcSpeedup > 1 || cParam.destSpeedup > 1 || fullSize <= 1) || !cParam.unifiedBuffer) {
require(useOutputQueues)
val qs = cParam.virtualChannelParams.map(v => Module(new Queue(new BaseFlit(cParam.payloadBits), v.bufferSize)))
qs.zipWithIndex.foreach { case (q,i) =>
val sel = io.enq.map(f => f.valid && f.bits.virt_channel_id === i.U)
q.io.enq.valid := sel.orR
q.io.enq.bits.head := Mux1H(sel, io.enq.map(_.bits.head))
q.io.enq.bits.tail := Mux1H(sel, io.enq.map(_.bits.tail))
q.io.enq.bits.payload := Mux1H(sel, io.enq.map(_.bits.payload))
io.deq(i) <> q.io.deq
}
} else {
val mem = Mem(fullSize, new BaseFlit(cParam.payloadBits))
val heads = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val tails = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val empty = (heads zip tails).map(t => t._1 === t._2)
val qs = Seq.fill(nVirtualChannels) { Module(new Queue(new BaseFlit(cParam.payloadBits), 1, pipe=true)) }
qs.foreach(_.io.enq.valid := false.B)
qs.foreach(_.io.enq.bits := DontCare)
val vc_sel = UIntToOH(io.enq(0).bits.virt_channel_id)
val flit = Wire(new BaseFlit(cParam.payloadBits))
val direct_to_q = (Mux1H(vc_sel, qs.map(_.io.enq.ready)) && Mux1H(vc_sel, empty)) && useOutputQueues.B
flit.head := io.enq(0).bits.head
flit.tail := io.enq(0).bits.tail
flit.payload := io.enq(0).bits.payload
when (io.enq(0).valid && !direct_to_q) {
val tail = tails(io.enq(0).bits.virt_channel_id)
mem.write(tail, flit)
tails(io.enq(0).bits.virt_channel_id) := Mux(
tail === Mux1H(vc_sel, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(vc_sel, starts.map(_.U)),
tail + 1.U)
} .elsewhen (io.enq(0).valid && direct_to_q) {
for (i <- 0 until nVirtualChannels) {
when (io.enq(0).bits.virt_channel_id === i.U) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := flit
}
}
}
if (useOutputQueues) {
val can_to_q = (0 until nVirtualChannels).map { i => !empty(i) && qs(i).io.enq.ready }
val to_q_oh = PriorityEncoderOH(can_to_q)
val to_q = OHToUInt(to_q_oh)
when (can_to_q.orR) {
val head = Mux1H(to_q_oh, heads)
heads(to_q) := Mux(
head === Mux1H(to_q_oh, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(to_q_oh, starts.map(_.U)),
head + 1.U)
for (i <- 0 until nVirtualChannels) {
when (to_q_oh(i)) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := mem.read(head)
}
}
}
for (i <- 0 until nVirtualChannels) {
io.deq(i) <> qs(i).io.deq
}
} else {
qs.map(_.io.deq.ready := false.B)
val ready_sel = io.deq.map(_.ready)
val fire = io.deq.map(_.fire)
assert(PopCount(fire) <= 1.U)
val head = Mux1H(fire, heads)
when (fire.orR) {
val fire_idx = OHToUInt(fire)
heads(fire_idx) := Mux(
head === Mux1H(fire, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(fire, starts.map(_.U)),
head + 1.U)
}
val read_flit = mem.read(head)
for (i <- 0 until nVirtualChannels) {
io.deq(i).valid := !empty(i)
io.deq(i).bits := read_flit
}
}
}
}
class InputUnit(cParam: ChannelParams, outParams: Seq[ChannelParams],
egressParams: Seq[EgressChannelParams],
combineRCVA: Boolean, combineSAST: Boolean
)
(implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) {
val nVirtualChannels = cParam.nVirtualChannels
val virtualChannelParams = cParam.virtualChannelParams
class InputUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) {
val in = Flipped(new Channel(cParam.asInstanceOf[ChannelParams]))
}
val io = IO(new InputUnitIO)
val g_i :: g_r :: g_v :: g_a :: g_c :: Nil = Enum(5)
class InputState extends Bundle {
val g = UInt(3.W)
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
val flow = new FlowRoutingBundle
val fifo_deps = UInt(nVirtualChannels.W)
}
val input_buffer = Module(new InputBuffer(cParam))
for (i <- 0 until cParam.srcSpeedup) {
input_buffer.io.enq(i) := io.in.flit(i)
}
input_buffer.io.deq.foreach(_.ready := false.B)
val route_arbiter = Module(new Arbiter(
new RouteComputerReq, nVirtualChannels
))
io.router_req <> route_arbiter.io.out
val states = Reg(Vec(nVirtualChannels, new InputState))
val anyFifo = cParam.possibleFlows.map(_.fifo).reduce(_||_)
val allFifo = cParam.possibleFlows.map(_.fifo).reduce(_&&_)
if (anyFifo) {
val idle_mask = VecInit(states.map(_.g === g_i)).asUInt
for (s <- states)
for (i <- 0 until nVirtualChannels)
s.fifo_deps := s.fifo_deps & ~idle_mask
}
for (i <- 0 until cParam.srcSpeedup) {
when (io.in.flit(i).fire && io.in.flit(i).bits.head) {
val id = io.in.flit(i).bits.virt_channel_id
assert(id < nVirtualChannels.U)
assert(states(id).g === g_i)
val at_dest = io.in.flit(i).bits.flow.egress_node === nodeId.U
states(id).g := Mux(at_dest, g_v, g_r)
states(id).vc_sel.foreach(_.foreach(_ := false.B))
for (o <- 0 until nEgress) {
when (o.U === io.in.flit(i).bits.flow.egress_node_id) {
states(id).vc_sel(o+nOutputs)(0) := true.B
}
}
states(id).flow := io.in.flit(i).bits.flow
if (anyFifo) {
val fifo = cParam.possibleFlows.filter(_.fifo).map(_.isFlow(io.in.flit(i).bits.flow)).toSeq.orR
states(id).fifo_deps := VecInit(states.zipWithIndex.map { case (s, j) =>
s.g =/= g_i && s.flow.asUInt === io.in.flit(i).bits.flow.asUInt && j.U =/= id
}).asUInt
}
}
}
(route_arbiter.io.in zip states).zipWithIndex.map { case ((i,s),idx) =>
if (virtualChannelParams(idx).traversable) {
i.valid := s.g === g_r
i.bits.flow := s.flow
i.bits.src_virt_id := idx.U
when (i.fire) { s.g := g_v }
} else {
i.valid := false.B
i.bits := DontCare
}
}
when (io.router_req.fire) {
val id = io.router_req.bits.src_virt_id
assert(states(id).g === g_r)
states(id).g := g_v
for (i <- 0 until nVirtualChannels) {
when (i.U === id) {
states(i).vc_sel := io.router_resp.vc_sel
}
}
}
val mask = RegInit(0.U(nVirtualChannels.W))
val vcalloc_reqs = Wire(Vec(nVirtualChannels, new VCAllocReq(cParam, outParams, egressParams)))
val vcalloc_vals = Wire(Vec(nVirtualChannels, Bool()))
val vcalloc_filter = PriorityEncoderOH(Cat(vcalloc_vals.asUInt, vcalloc_vals.asUInt & ~mask))
val vcalloc_sel = vcalloc_filter(nVirtualChannels-1,0) | (vcalloc_filter >> nVirtualChannels)
// Prioritize incoming packetes
when (io.router_req.fire) {
mask := (1.U << io.router_req.bits.src_virt_id) - 1.U
} .elsewhen (vcalloc_vals.orR) {
mask := Mux1H(vcalloc_sel, (0 until nVirtualChannels).map { w => ~(0.U((w+1).W)) })
}
io.vcalloc_req.valid := vcalloc_vals.orR
io.vcalloc_req.bits := Mux1H(vcalloc_sel, vcalloc_reqs)
states.zipWithIndex.map { case (s,idx) =>
if (virtualChannelParams(idx).traversable) {
vcalloc_vals(idx) := s.g === g_v && s.fifo_deps === 0.U
vcalloc_reqs(idx).in_vc := idx.U
vcalloc_reqs(idx).vc_sel := s.vc_sel
vcalloc_reqs(idx).flow := s.flow
when (vcalloc_vals(idx) && vcalloc_sel(idx) && io.vcalloc_req.ready) { s.g := g_a }
if (combineRCVA) {
when (route_arbiter.io.in(idx).fire) {
vcalloc_vals(idx) := true.B
vcalloc_reqs(idx).vc_sel := io.router_resp.vc_sel
}
}
} else {
vcalloc_vals(idx) := false.B
vcalloc_reqs(idx) := DontCare
}
}
io.debug.va_stall := PopCount(vcalloc_vals) - io.vcalloc_req.ready
when (io.vcalloc_req.fire) {
for (i <- 0 until nVirtualChannels) {
when (vcalloc_sel(i)) {
states(i).vc_sel := io.vcalloc_resp.vc_sel
states(i).g := g_a
if (!combineRCVA) {
assert(states(i).g === g_v)
}
}
}
}
val salloc_arb = Module(new SwitchArbiter(
nVirtualChannels,
cParam.destSpeedup,
outParams, egressParams
))
(states zip salloc_arb.io.in).zipWithIndex.map { case ((s,r),i) =>
if (virtualChannelParams(i).traversable) {
val credit_available = (s.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U
r.valid := s.g === g_a && credit_available && input_buffer.io.deq(i).valid
r.bits.vc_sel := s.vc_sel
val deq_tail = input_buffer.io.deq(i).bits.tail
r.bits.tail := deq_tail
when (r.fire && deq_tail) {
s.g := g_i
}
input_buffer.io.deq(i).ready := r.ready
} else {
r.valid := false.B
r.bits := DontCare
}
}
io.debug.sa_stall := PopCount(salloc_arb.io.in.map(r => r.valid && !r.ready))
io.salloc_req <> salloc_arb.io.out
when (io.block) {
salloc_arb.io.out.foreach(_.ready := false.B)
io.salloc_req.foreach(_.valid := false.B)
}
class OutBundle extends Bundle {
val valid = Bool()
val vid = UInt(virtualChannelBits.W)
val out_vid = UInt(log2Up(allOutParams.map(_.nVirtualChannels).max).W)
val flit = new Flit(cParam.payloadBits)
}
val salloc_outs = if (combineSAST) {
Wire(Vec(cParam.destSpeedup, new OutBundle))
} else {
Reg(Vec(cParam.destSpeedup, new OutBundle))
}
io.in.credit_return := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire, salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
io.in.vc_free := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire && Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)),
salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
for (i <- 0 until cParam.destSpeedup) {
val salloc_out = salloc_outs(i)
salloc_out.valid := salloc_arb.io.out(i).fire
salloc_out.vid := OHToUInt(salloc_arb.io.chosen_oh(i))
val vc_sel = Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.vc_sel))
val channel_oh = vc_sel.map(_.reduce(_||_)).toSeq
val virt_channel = Mux1H(channel_oh, vc_sel.map(v => OHToUInt(v)).toSeq)
when (salloc_arb.io.out(i).fire) {
salloc_out.out_vid := virt_channel
salloc_out.flit.payload := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.payload))
salloc_out.flit.head := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.head))
salloc_out.flit.tail := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail))
salloc_out.flit.flow := Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.flow))
} .otherwise {
salloc_out.out_vid := DontCare
salloc_out.flit := DontCare
}
salloc_out.flit.virt_channel_id := DontCare // this gets set in the switch
io.out(i).valid := salloc_out.valid
io.out(i).bits.flit := salloc_out.flit
io.out(i).bits.out_virt_channel := salloc_out.out_vid
}
def filterVCSel(sel: MixedVec[Vec[Bool]], srcV: Int) = {
if (virtualChannelParams(srcV).traversable) {
outParams.zipWithIndex.map { case (oP, oI) =>
(0 until oP.nVirtualChannels).map { oV =>
var allow = false
virtualChannelParams(srcV).possibleFlows.foreach { pI =>
allow = allow || routingRelation(
cParam.channelRoutingInfos(srcV),
oP.channelRoutingInfos(oV),
pI
)
}
if (!allow)
sel(oI)(oV) := false.B
}
}
}
}
(0 until nVirtualChannels).map { i =>
if (!virtualChannelParams(i).traversable) states(i) := DontCare
filterVCSel(states(i).vc_sel, i)
}
when (reset.asBool) {
states.foreach(_.g := g_i)
}
}
| module mem_24x75( // @[InputUnit.scala:85:18]
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [74:0] R0_data,
input [4:0] R1_addr,
input R1_en,
input R1_clk,
output [74:0] R1_data,
input [4:0] R2_addr,
input R2_en,
input R2_clk,
output [74:0] R2_data,
input [4:0] R3_addr,
input R3_en,
input R3_clk,
output [74:0] R3_data,
input [4:0] R4_addr,
input R4_en,
input R4_clk,
output [74:0] R4_data,
input [4:0] R5_addr,
input R5_en,
input R5_clk,
output [74:0] R5_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [74:0] W0_data
);
reg [74:0] Memory[0:23]; // @[InputUnit.scala:85:18]
always @(posedge W0_clk) begin // @[InputUnit.scala:85:18]
if (W0_en & 1'h1) // @[InputUnit.scala:85:18]
Memory[W0_addr] <= W0_data; // @[InputUnit.scala:85:18]
always @(posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
File AccumulatorMem.scala:
package gemmini
import chisel3._
import chisel3.util._
import Util._
class AccumulatorReadReq[T <: Data: Arithmetic, U <: Data](n: Int, acc_t: T, scale_t: U) extends Bundle {
val addr = UInt(log2Ceil(n).W)
val scale = scale_t
val igelu_qb = acc_t.cloneType
val igelu_qc = acc_t.cloneType
val iexp_qln2 = acc_t.cloneType
val iexp_qln2_inv = acc_t.cloneType
val act = UInt(Activation.bitwidth.W) // TODO magic number
val full = Bool() // Whether or not we return the full bitwidth output
val fromDMA = Bool()
}
class AccumulatorReadResp[T <: Data: Arithmetic, U <: Data](fullDataType: Vec[Vec[T]], scale_t: U) extends Bundle {
val data = fullDataType.cloneType
val fromDMA = Bool()
val scale = scale_t.cloneType
val igelu_qb = fullDataType.head.head.cloneType
val igelu_qc = fullDataType.head.head.cloneType
val iexp_qln2 = fullDataType.head.head.cloneType
val iexp_qln2_inv = fullDataType.head.head.cloneType
val act = UInt(Activation.bitwidth.W) // TODO magic number
val acc_bank_id = UInt(2.W) // TODO magic number
}
class AccumulatorReadIO[T <: Data: Arithmetic, U <: Data](n: Int, fullDataType: Vec[Vec[T]], scale_t: U) extends Bundle {
val req = Decoupled(new AccumulatorReadReq[T, U](n, fullDataType.head.head.cloneType, scale_t))
val resp = Flipped(Decoupled(new AccumulatorReadResp[T, U](fullDataType, scale_t)))
}
class AccumulatorWriteReq[T <: Data: Arithmetic](n: Int, t: Vec[Vec[T]]) extends Bundle {
val addr = UInt(log2Up(n).W)
val data = t.cloneType
val acc = Bool()
val mask = Vec(t.getWidth / 8, Bool()) // TODO Use aligned_to here
}
class AccumulatorMemIO [T <: Data: Arithmetic, U <: Data](n: Int, t: Vec[Vec[T]], scale_t: U,
acc_sub_banks: Int, use_shared_ext_mem: Boolean
) extends Bundle {
val read = Flipped(new AccumulatorReadIO(n, t, scale_t))
val write = Flipped(Decoupled(new AccumulatorWriteReq(n, t)))
val ext_mem = if (use_shared_ext_mem) Some(Vec(acc_sub_banks, new ExtMemIO)) else None
val adder = new Bundle {
val valid = Output(Bool())
val op1 = Output(t.cloneType)
val op2 = Output(t.cloneType)
val sum = Input(t.cloneType)
}
}
class AccPipe[T <: Data : Arithmetic](latency: Int, t: T)(implicit ev: Arithmetic[T]) extends Module {
val io = IO(new Bundle {
val op1 = Input(t.cloneType)
val op2 = Input(t.cloneType)
val sum = Output(t.cloneType)
})
import ev._
io.sum := ShiftRegister(io.op1 + io.op2, latency)
}
class AccPipeShared[T <: Data : Arithmetic](latency: Int, t: Vec[Vec[T]], banks: Int) extends Module {
val io = IO(new Bundle {
val in_sel = Input(Vec(banks, Bool()))
val ina = Input(Vec(banks, t.cloneType))
val inb = Input(Vec(banks, t.cloneType))
val out = Output(t.cloneType)
})
val ina = Mux1H(io.in_sel, io.ina)
val inb = Mux1H(io.in_sel, io.inb)
io.out := VecInit((ina zip inb).map { case (rv, wv) =>
VecInit((rv zip wv).map { case (re, we) =>
val m = Module(new AccPipe(latency, t.head.head.cloneType))
m.io.op1 := re
m.io.op2 := we
m.io.sum
})
})
}
class AccumulatorMem[T <: Data, U <: Data](
n: Int, t: Vec[Vec[T]], scale_func: (T, U) => T, scale_t: U,
acc_singleported: Boolean, acc_sub_banks: Int,
use_shared_ext_mem: Boolean,
acc_latency: Int, acc_type: T, is_dummy: Boolean
)
(implicit ev: Arithmetic[T]) extends Module {
// TODO Do writes in this module work with matrices of size 2? If we try to read from an address right after writing
// to it, then we might not get the written data. We might need some kind of cooldown counter after addresses in the
// accumulator have been written to for configurations with such small matrices
// TODO make a new aligned_to variable specifically for AccumulatorMem. We should assume that inputs are at least
// accType.getWidth/8 aligned, because it won't make sense to do matrix additions directly in the DMA otherwise.
import ev._
// TODO unify this with TwoPortSyncMemIO
val io = IO(new AccumulatorMemIO(n, t, scale_t, acc_sub_banks, use_shared_ext_mem))
require (acc_latency >= 2)
val pipelined_writes = Reg(Vec(acc_latency, Valid(new AccumulatorWriteReq(n, t))))
val oldest_pipelined_write = pipelined_writes(acc_latency-1)
pipelined_writes(0).valid := io.write.fire
pipelined_writes(0).bits := io.write.bits
for (i <- 1 until acc_latency) {
pipelined_writes(i) := pipelined_writes(i-1)
}
val rdata_for_adder = Wire(t)
rdata_for_adder := DontCare
val rdata_for_read_resp = Wire(t)
rdata_for_read_resp := DontCare
val adder_sum = io.adder.sum
io.adder.valid := pipelined_writes(0).valid && pipelined_writes(0).bits.acc
io.adder.op1 := rdata_for_adder
io.adder.op2 := pipelined_writes(0).bits.data
val block_read_req = WireInit(false.B)
val block_write_req = WireInit(false.B)
val mask_len = t.getWidth / 8
val mask_elem = UInt((t.getWidth / mask_len).W)
if (!acc_singleported && !is_dummy) {
require(!use_shared_ext_mem)
val mem = TwoPortSyncMem(n, t, mask_len) // TODO We assume byte-alignment here. Use aligned_to instead
mem.io.waddr := oldest_pipelined_write.bits.addr
mem.io.wen := oldest_pipelined_write.valid
mem.io.wdata := Mux(oldest_pipelined_write.bits.acc, adder_sum, oldest_pipelined_write.bits.data)
mem.io.mask := oldest_pipelined_write.bits.mask
rdata_for_adder := mem.io.rdata
rdata_for_read_resp := mem.io.rdata
mem.io.raddr := Mux(io.write.fire && io.write.bits.acc, io.write.bits.addr, io.read.req.bits.addr)
mem.io.ren := io.read.req.fire || (io.write.fire && io.write.bits.acc)
} else if (!is_dummy) {
val rmw_req = Wire(Decoupled(UInt()))
rmw_req.valid := io.write.valid && io.write.bits.acc
rmw_req.bits := io.write.bits.addr
rmw_req.ready := true.B
block_write_req := !rmw_req.ready
val only_read_req = Wire(Decoupled(UInt()))
only_read_req.valid := io.read.req.valid
only_read_req.bits := io.read.req.bits.addr
only_read_req.ready := true.B
block_read_req := !only_read_req.ready
for (i <- 0 until acc_sub_banks) {
def isThisBank(addr: UInt) = addr(log2Ceil(acc_sub_banks)-1,0) === i.U
def getBankIdx(addr: UInt) = addr >> log2Ceil(acc_sub_banks)
val (read, write) = if (use_shared_ext_mem) {
def read(addr: UInt, ren: Bool): Data = {
io.ext_mem.get(i).read_en := ren
io.ext_mem.get(i).read_addr := addr
io.ext_mem.get(i).read_data
}
io.ext_mem.get(i).write_en := false.B
io.ext_mem.get(i).write_addr := DontCare
io.ext_mem.get(i).write_data := DontCare
io.ext_mem.get(i).write_mask := DontCare
def write(addr: UInt, wdata: Vec[UInt], wmask: Vec[Bool]) = {
io.ext_mem.get(i).write_en := true.B
io.ext_mem.get(i).write_addr := addr
io.ext_mem.get(i).write_data := wdata.asUInt
io.ext_mem.get(i).write_mask := wmask.asUInt
}
(read _, write _)
} else {
val mem = SyncReadMem(n / acc_sub_banks, Vec(mask_len, mask_elem))
def read(addr: UInt, ren: Bool): Data = mem.read(addr, ren)
def write(addr: UInt, wdata: Vec[UInt], wmask: Vec[Bool]) = mem.write(addr, wdata, wmask)
(read _, write _)
}
val ren = WireInit(false.B)
val raddr = WireInit(getBankIdx(rmw_req.bits))
val nEntries = 3
// Writes coming 2 cycles after read leads to bad bank behavior
// Add another buffer here
class W_Q_Entry[T <: Data](mask_len: Int, mask_elem: T) extends Bundle {
val valid = Bool()
val data = Vec(mask_len, mask_elem)
val mask = Vec(mask_len, Bool())
val addr = UInt(log2Ceil(n/acc_sub_banks).W)
}
val w_q = Reg(Vec(nEntries, new W_Q_Entry(mask_len, mask_elem)))
for (e <- w_q) {
when (e.valid) {
assert(!(
io.write.fire && io.write.bits.acc &&
isThisBank(io.write.bits.addr) && getBankIdx(io.write.bits.addr) === e.addr &&
((io.write.bits.mask.asUInt & e.mask.asUInt) =/= 0.U)
), "you cannot accumulate to an AccumulatorMem address until previous writes to that address have completed")
when (io.write.bits.acc && isThisBank(io.write.bits.addr) && getBankIdx(io.write.bits.addr) === e.addr) {
rmw_req.ready := false.B
}
when (isThisBank(io.read.req.bits.addr) && getBankIdx(io.read.req.bits.addr) === e.addr) {
only_read_req.ready := false.B
}
}
}
val w_q_head = RegInit(1.U(nEntries.W))
val w_q_tail = RegInit(1.U(nEntries.W))
val w_q_full = (w_q_tail.asBools zip w_q.map(_.valid)).map({ case (h,v) => h && v }).reduce(_||_)
val w_q_empty = !(w_q_head.asBools zip w_q.map(_.valid)).map({ case (h,v) => h && v }).reduce(_||_)
val wen = WireInit(false.B)
val wdata = Mux1H(w_q_head.asBools, w_q.map(_.data))
val wmask = Mux1H(w_q_head.asBools, w_q.map(_.mask))
val waddr = Mux1H(w_q_head.asBools, w_q.map(_.addr))
when (wen) {
w_q_head := (w_q_head << 1).asUInt | w_q_head(nEntries-1)
for (i <- 0 until nEntries) {
when (w_q_head(i)) {
w_q(i).valid := false.B
}
}
}
val w_q_push = oldest_pipelined_write.valid && isThisBank(oldest_pipelined_write.bits.addr)
when (w_q_push) {
assert(!w_q_full || wen, "we ran out of acc-sub-bank write q entries")
w_q_tail := (w_q_tail << 1).asUInt | w_q_tail(nEntries-1)
for (i <- 0 until nEntries) {
when (w_q_tail(i)) {
w_q(i).valid := true.B
w_q(i).data := Mux(oldest_pipelined_write.bits.acc, adder_sum, oldest_pipelined_write.bits.data).asTypeOf(Vec(mask_len, mask_elem))
w_q(i).mask := oldest_pipelined_write.bits.mask
w_q(i).addr := getBankIdx(oldest_pipelined_write.bits.addr)
}
}
}
val bank_rdata = read(raddr, ren && !wen).asTypeOf(t)
when (RegNext(ren && rmw_req.valid && isThisBank(rmw_req.bits))) {
rdata_for_adder := bank_rdata
} .elsewhen (RegNext(ren)) {
rdata_for_read_resp := bank_rdata
}
when (wen) {
write(waddr, wdata, wmask)
}
// Three requestors, 1 slot
// Priority is (in descending order):
// 1. incoming reads for RMW
// 2. writes from RMW
// 3. incoming reads
when (rmw_req.fire && isThisBank(rmw_req.bits)) {
ren := true.B
when (isThisBank(only_read_req.bits)) {
only_read_req.ready := false.B
}
} .elsewhen (!w_q_empty) {
wen := true.B
when (isThisBank(only_read_req.bits)) {
only_read_req.ready := false.B
}
} .otherwise {
ren := isThisBank(only_read_req.bits) && only_read_req.fire
raddr := getBankIdx(only_read_req.bits)
}
when (reset.asBool) {
w_q.foreach(_.valid := false.B)
}
}
}
val q = Module(new Queue(new AccumulatorReadResp(t, scale_t), 1, true, true))
q.io.enq.bits.data := rdata_for_read_resp
if (is_dummy) {
rdata_for_read_resp := DontCare
rdata_for_adder := DontCare
}
q.io.enq.bits.scale := RegNext(io.read.req.bits.scale)
q.io.enq.bits.igelu_qb := RegNext(io.read.req.bits.igelu_qb)
q.io.enq.bits.igelu_qc := RegNext(io.read.req.bits.igelu_qc)
q.io.enq.bits.iexp_qln2 := RegNext(io.read.req.bits.iexp_qln2)
q.io.enq.bits.iexp_qln2_inv := RegNext(io.read.req.bits.iexp_qln2_inv)
q.io.enq.bits.act := RegNext(io.read.req.bits.act)
q.io.enq.bits.fromDMA := RegNext(io.read.req.bits.fromDMA)
q.io.enq.bits.acc_bank_id := DontCare
q.io.enq.valid := RegNext(io.read.req.fire)
val p = q.io.deq
io.read.resp.bits.data := p.bits.data
io.read.resp.bits.fromDMA := p.bits.fromDMA
io.read.resp.bits.igelu_qb := p.bits.igelu_qb
io.read.resp.bits.igelu_qc := p.bits.igelu_qc
io.read.resp.bits.iexp_qln2 := p.bits.iexp_qln2
io.read.resp.bits.iexp_qln2_inv := p.bits.iexp_qln2_inv
io.read.resp.bits.act := p.bits.act
io.read.resp.bits.scale := p.bits.scale
io.read.resp.bits.acc_bank_id := DontCare // This is set in Scratchpad
io.read.resp.valid := p.valid
p.ready := io.read.resp.ready
val q_will_be_empty = (q.io.count +& q.io.enq.fire) - q.io.deq.fire === 0.U
io.read.req.ready := q_will_be_empty && (
// Make sure we aren't accumulating, which would take over both ports
!(io.write.valid && io.write.bits.acc) &&
!pipelined_writes.map(r => r.valid && r.bits.addr === io.read.req.bits.addr).reduce(_||_) &&
!block_read_req
)
io.write.ready := !block_write_req &&
!pipelined_writes.map(r => r.valid && r.bits.addr === io.write.bits.addr && io.write.bits.acc).reduce(_||_)
when (reset.asBool) {
pipelined_writes.foreach(_.valid := false.B)
}
// assert(!(io.read.req.valid && io.write.en && io.write.acc), "reading and accumulating simultaneously is not supported")
assert(!(io.read.req.fire && io.write.fire && io.read.req.bits.addr === io.write.bits.addr), "reading from and writing to same address is not supported")
}
| module AccPipe_14( // @[AccumulatorMem.scala:63:7]
input clock, // @[AccumulatorMem.scala:63:7]
input reset, // @[AccumulatorMem.scala:63:7]
input [31:0] io_op1, // @[AccumulatorMem.scala:64:14]
input [31:0] io_op2, // @[AccumulatorMem.scala:64:14]
output [31:0] io_sum // @[AccumulatorMem.scala:64:14]
);
wire [31:0] io_op1_0 = io_op1; // @[AccumulatorMem.scala:63:7]
wire [31:0] io_op2_0 = io_op2; // @[AccumulatorMem.scala:63:7]
wire [31:0] io_sum_0; // @[AccumulatorMem.scala:63:7]
wire [32:0] _io_sum_T = {io_op1_0[31], io_op1_0} + {io_op2_0[31], io_op2_0}; // @[Arithmetic.scala:94:38]
wire [31:0] _io_sum_T_1 = _io_sum_T[31:0]; // @[Arithmetic.scala:94:38]
wire [31:0] _io_sum_T_2 = _io_sum_T_1; // @[Arithmetic.scala:94:38]
reg [31:0] io_sum_r; // @[AccumulatorMem.scala:70:26]
assign io_sum_0 = io_sum_r; // @[AccumulatorMem.scala:63:7, :70:26]
always @(posedge clock) // @[AccumulatorMem.scala:63:7]
io_sum_r <= _io_sum_T_2; // @[Arithmetic.scala:94:38]
assign io_sum = io_sum_0; // @[AccumulatorMem.scala:63:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
package constellation.channel
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy._
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.util._
import constellation.noc.{HasNoCParams}
class NoCMonitor(val cParam: ChannelParams)(implicit val p: Parameters) extends Module with HasNoCParams {
val io = IO(new Bundle {
val in = Input(new Channel(cParam))
})
val in_flight = RegInit(VecInit(Seq.fill(cParam.nVirtualChannels) { false.B }))
for (i <- 0 until cParam.srcSpeedup) {
val flit = io.in.flit(i)
when (flit.valid) {
when (flit.bits.head) {
in_flight(flit.bits.virt_channel_id) := true.B
assert (!in_flight(flit.bits.virt_channel_id), "Flit head/tail sequencing is broken")
}
when (flit.bits.tail) {
in_flight(flit.bits.virt_channel_id) := false.B
}
}
val possibleFlows = cParam.possibleFlows
when (flit.valid && flit.bits.head) {
cParam match {
case n: ChannelParams => n.virtualChannelParams.zipWithIndex.foreach { case (v,i) =>
assert(flit.bits.virt_channel_id =/= i.U || v.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
case _ => assert(cParam.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
}
}
}
| module NoCMonitor_129( // @[Monitor.scala:11:7]
input clock, // @[Monitor.scala:11:7]
input reset, // @[Monitor.scala:11:7]
input io_in_flit_0_valid, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_head, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_tail, // @[Monitor.scala:12:14]
input [3:0] io_in_flit_0_bits_flow_ingress_node, // @[Monitor.scala:12:14]
input [1:0] io_in_flit_0_bits_flow_ingress_node_id, // @[Monitor.scala:12:14]
input [3:0] io_in_flit_0_bits_flow_egress_node, // @[Monitor.scala:12:14]
input [2:0] io_in_flit_0_bits_flow_egress_node_id, // @[Monitor.scala:12:14]
input [3:0] io_in_flit_0_bits_virt_channel_id // @[Monitor.scala:12:14]
);
reg in_flight_0; // @[Monitor.scala:16:26]
reg in_flight_1; // @[Monitor.scala:16:26]
reg in_flight_2; // @[Monitor.scala:16:26]
reg in_flight_3; // @[Monitor.scala:16:26]
reg in_flight_4; // @[Monitor.scala:16:26]
reg in_flight_5; // @[Monitor.scala:16:26]
reg in_flight_6; // @[Monitor.scala:16:26]
reg in_flight_7; // @[Monitor.scala:16:26]
reg in_flight_8; // @[Monitor.scala:16:26]
reg in_flight_9; // @[Monitor.scala:16:26]
wire _GEN = io_in_flit_0_bits_virt_channel_id == 4'h0; // @[Monitor.scala:21:46]
wire _GEN_0 = io_in_flit_0_bits_virt_channel_id == 4'h1; // @[Monitor.scala:21:46] |
Generate the Verilog code corresponding to the following Chisel files.
File primitives.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object lowMask
{
def apply(in: UInt, topBound: BigInt, bottomBound: BigInt): UInt =
{
require(topBound != bottomBound)
val numInVals = BigInt(1)<<in.getWidth
if (topBound < bottomBound) {
lowMask(~in, numInVals - 1 - topBound, numInVals - 1 - bottomBound)
} else if (numInVals > 64 /* Empirical */) {
// For simulation performance, we should avoid generating
// exteremely wide shifters, so we divide and conquer.
// Empirically, this does not impact synthesis QoR.
val mid = numInVals / 2
val msb = in(in.getWidth - 1)
val lsbs = in(in.getWidth - 2, 0)
if (mid < topBound) {
if (mid <= bottomBound) {
Mux(msb,
lowMask(lsbs, topBound - mid, bottomBound - mid),
0.U
)
} else {
Mux(msb,
lowMask(lsbs, topBound - mid, 0) ## ((BigInt(1)<<(mid - bottomBound).toInt) - 1).U,
lowMask(lsbs, mid, bottomBound)
)
}
} else {
~Mux(msb, 0.U, ~lowMask(lsbs, topBound, bottomBound))
}
} else {
val shift = (BigInt(-1)<<numInVals.toInt).S>>in
Reverse(
shift(
(numInVals - 1 - bottomBound).toInt,
(numInVals - topBound).toInt
)
)
}
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object countLeadingZeros
{
def apply(in: UInt): UInt = PriorityEncoder(in.asBools.reverse)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy2
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 1)>>1
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 2 + 1, ix * 2).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 2).orR
reducedVec.asUInt
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy4
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 3)>>2
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 4 + 3, ix * 4).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 4).orR
reducedVec.asUInt
}
}
File RoundAnyRawFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util.Fill
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundAnyRawFNToRecFN(
inExpWidth: Int,
inSigWidth: Int,
outExpWidth: Int,
outSigWidth: Int,
options: Int
)
extends RawModule
{
override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(inExpWidth, inSigWidth))
// (allowed exponent range has limits)
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0)
val effectiveInSigWidth =
if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1
val neverUnderflows =
((options &
(flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact)
) != 0) ||
(inExpWidth < outExpWidth)
val neverOverflows =
((options & flRoundOpt_neverOverflows) != 0) ||
(inExpWidth < outExpWidth)
val outNaNExp = BigInt(7)<<(outExpWidth - 2)
val outInfExp = BigInt(6)<<(outExpWidth - 2)
val outMaxFiniteExp = outInfExp - 1
val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2
val outMinNonzeroExp = outMinNormExp - outSigWidth + 1
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_near_even = (io.roundingMode === round_near_even)
val roundingMode_minMag = (io.roundingMode === round_minMag)
val roundingMode_min = (io.roundingMode === round_min)
val roundingMode_max = (io.roundingMode === round_max)
val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag)
val roundingMode_odd = (io.roundingMode === round_odd)
val roundMagUp =
(roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sAdjustedExp =
if (inExpWidth < outExpWidth)
(io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
)(outExpWidth, 0).zext
else if (inExpWidth == outExpWidth)
io.in.sExp
else
io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
val adjustedSig =
if (inSigWidth <= outSigWidth + 2)
io.in.sig<<(outSigWidth - inSigWidth + 2)
else
(io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ##
io.in.sig(inSigWidth - outSigWidth - 2, 0).orR
)
val doShiftSigDown1 =
if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2)
val common_expOut = Wire(UInt((outExpWidth + 1).W))
val common_fractOut = Wire(UInt((outSigWidth - 1).W))
val common_overflow = Wire(Bool())
val common_totalUnderflow = Wire(Bool())
val common_underflow = Wire(Bool())
val common_inexact = Wire(Bool())
if (
neverOverflows && neverUnderflows
&& (effectiveInSigWidth <= outSigWidth)
) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1
common_fractOut :=
Mux(doShiftSigDown1,
adjustedSig(outSigWidth + 1, 3),
adjustedSig(outSigWidth, 2)
)
common_overflow := false.B
common_totalUnderflow := false.B
common_underflow := false.B
common_inexact := false.B
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundMask =
if (neverUnderflows)
0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W)
else
(lowMask(
sAdjustedExp(outExpWidth, 0),
outMinNormExp - outSigWidth - 1,
outMinNormExp
) | doShiftSigDown1) ##
3.U(2.W)
val shiftedRoundMask = 0.U(1.W) ## roundMask>>1
val roundPosMask = ~shiftedRoundMask & roundMask
val roundPosBit = (adjustedSig & roundPosMask).orR
val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR
val anyRound = roundPosBit || anyRoundExtra
val roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
roundPosBit) ||
(roundMagUp && anyRound)
val roundedSig: Bits =
Mux(roundIncr,
(((adjustedSig | roundMask)>>2) +& 1.U) &
~Mux(roundingMode_near_even && roundPosBit &&
! anyRoundExtra,
roundMask>>1,
0.U((outSigWidth + 2).W)
),
(adjustedSig & ~roundMask)>>2 |
Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U)
)
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext
common_expOut := sRoundedExp(outExpWidth, 0)
common_fractOut :=
Mux(doShiftSigDown1,
roundedSig(outSigWidth - 1, 1),
roundedSig(outSigWidth - 2, 0)
)
common_overflow :=
(if (neverOverflows) false.B else
//*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?:
(sRoundedExp>>(outExpWidth - 1) >= 3.S))
common_totalUnderflow :=
(if (neverUnderflows) false.B else
//*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?:
(sRoundedExp < outMinNonzeroExp.S))
val unboundedRange_roundPosBit =
Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1))
val unboundedRange_anyRound =
(doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR
val unboundedRange_roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
unboundedRange_roundPosBit) ||
(roundMagUp && unboundedRange_anyRound)
val roundCarry =
Mux(doShiftSigDown1,
roundedSig(outSigWidth + 1),
roundedSig(outSigWidth)
)
common_underflow :=
(if (neverUnderflows) false.B else
common_totalUnderflow ||
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
(anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) &&
Mux(doShiftSigDown1, roundMask(3), roundMask(2)) &&
! ((io.detectTininess === tininess_afterRounding) &&
! Mux(doShiftSigDown1,
roundMask(4),
roundMask(3)
) &&
roundCarry && roundPosBit &&
unboundedRange_roundIncr)))
common_inexact := common_totalUnderflow || anyRound
}
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val isNaNOut = io.invalidExc || io.in.isNaN
val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf
val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero
val overflow = commonCase && common_overflow
val underflow = commonCase && common_underflow
val inexact = overflow || (commonCase && common_inexact)
val overflow_roundMagUp =
roundingMode_near_even || roundingMode_near_maxMag || roundMagUp
val pegMinNonzeroMagOut =
commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd)
val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp
val notNaN_isInfOut =
notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp)
val signOut = Mux(isNaNOut, false.B, io.in.sign)
val expOut =
(common_expOut &
~Mux(io.in.isZero || common_totalUnderflow,
(BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMinNonzeroMagOut,
~outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMaxFiniteMagOut,
(BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W),
0.U
) &
~Mux(notNaN_isInfOut,
(BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
)) |
Mux(pegMinNonzeroMagOut,
outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) |
Mux(pegMaxFiniteMagOut,
outMaxFiniteExp.U((outExpWidth + 1).W),
0.U
) |
Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) |
Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U)
val fractOut =
Mux(isNaNOut || io.in.isZero || common_totalUnderflow,
Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U),
common_fractOut
) |
Fill(outSigWidth - 1, pegMaxFiniteMagOut)
io.out := signOut ## expOut ## fractOut
io.exceptionFlags :=
io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int)
extends RawModule
{
override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(expWidth, sigWidth + 2))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
expWidth, sigWidth + 2, expWidth, sigWidth, options))
roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc
roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc
roundAnyRawFNToRecFN.io.in := io.in
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
| module RoundAnyRawFNToRecFN_ie8_is26_oe8_os24_64( // @[RoundAnyRawFNToRecFN.scala:48:5]
input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [9:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [26:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:58:16]
output [32:0] io_out, // @[RoundAnyRawFNToRecFN.scala:58:16]
output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:58:16]
);
wire io_invalidExc_0 = io_invalidExc; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isNaN_0 = io_in_isNaN; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isInf_0 = io_in_isInf; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [9:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [26:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [8:0] _expOut_T_4 = 9'h194; // @[RoundAnyRawFNToRecFN.scala:258:19]
wire [15:0] _roundMask_T_5 = 16'hFF; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_4 = 16'hFF00; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_10 = 16'hFF00; // @[primitives.scala:77:20]
wire [11:0] _roundMask_T_13 = 12'hFF; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_14 = 16'hFF0; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_15 = 16'hF0F; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_20 = 16'hF0F0; // @[primitives.scala:77:20]
wire [13:0] _roundMask_T_23 = 14'hF0F; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_24 = 16'h3C3C; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_25 = 16'h3333; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_30 = 16'hCCCC; // @[primitives.scala:77:20]
wire [14:0] _roundMask_T_33 = 15'h3333; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_34 = 16'h6666; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_35 = 16'h5555; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_40 = 16'hAAAA; // @[primitives.scala:77:20]
wire [25:0] _roundedSig_T_15 = 26'h0; // @[RoundAnyRawFNToRecFN.scala:181:24]
wire [8:0] _expOut_T_6 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14]
wire [8:0] _expOut_T_9 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14]
wire [8:0] _expOut_T_5 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:257:18]
wire [8:0] _expOut_T_8 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:261:18]
wire [8:0] _expOut_T_14 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:269:16]
wire [8:0] _expOut_T_16 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:273:16]
wire [22:0] _fractOut_T_4 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:284:13]
wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire roundingMode_near_even = 1'h1; // @[RoundAnyRawFNToRecFN.scala:90:53]
wire _roundIncr_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:169:38]
wire _unboundedRange_roundIncr_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:207:38]
wire _common_underflow_T_7 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:222:49]
wire _overflow_roundMagUp_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:243:32]
wire overflow_roundMagUp = 1'h1; // @[RoundAnyRawFNToRecFN.scala:243:60]
wire [2:0] io_roundingMode = 3'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire roundingMode_minMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:91:53]
wire roundingMode_min = 1'h0; // @[RoundAnyRawFNToRecFN.scala:92:53]
wire roundingMode_max = 1'h0; // @[RoundAnyRawFNToRecFN.scala:93:53]
wire roundingMode_near_maxMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:94:53]
wire roundingMode_odd = 1'h0; // @[RoundAnyRawFNToRecFN.scala:95:53]
wire _roundMagUp_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:27]
wire _roundMagUp_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:63]
wire roundMagUp = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:42]
wire _roundIncr_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:171:29]
wire _roundedSig_T_13 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:181:42]
wire _unboundedRange_roundIncr_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:209:29]
wire _pegMinNonzeroMagOut_T_1 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:60]
wire pegMinNonzeroMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:45]
wire _pegMaxFiniteMagOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:42]
wire pegMaxFiniteMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:39]
wire notNaN_isSpecialInfOut = io_in_isInf_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :236:49]
wire [26:0] adjustedSig = io_in_sig_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :114:22]
wire [32:0] _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:286:33]
wire [4:0] _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:288:66]
wire [32:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire _roundMagUp_T_1 = ~io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :98:66]
wire doShiftSigDown1 = adjustedSig[26]; // @[RoundAnyRawFNToRecFN.scala:114:22, :120:57]
wire [8:0] _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:187:37]
wire [8:0] common_expOut; // @[RoundAnyRawFNToRecFN.scala:122:31]
wire [22:0] _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:189:16]
wire [22:0] common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31]
wire _common_overflow_T_1; // @[RoundAnyRawFNToRecFN.scala:196:50]
wire common_overflow; // @[RoundAnyRawFNToRecFN.scala:124:37]
wire _common_totalUnderflow_T; // @[RoundAnyRawFNToRecFN.scala:200:31]
wire common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37]
wire _common_underflow_T_18; // @[RoundAnyRawFNToRecFN.scala:217:40]
wire common_underflow; // @[RoundAnyRawFNToRecFN.scala:126:37]
wire _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:230:49]
wire common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37]
wire [8:0] _roundMask_T = io_in_sExp_0[8:0]; // @[RoundAnyRawFNToRecFN.scala:48:5, :156:37]
wire [8:0] _roundMask_T_1 = ~_roundMask_T; // @[primitives.scala:52:21]
wire roundMask_msb = _roundMask_T_1[8]; // @[primitives.scala:52:21, :58:25]
wire [7:0] roundMask_lsbs = _roundMask_T_1[7:0]; // @[primitives.scala:52:21, :59:26]
wire roundMask_msb_1 = roundMask_lsbs[7]; // @[primitives.scala:58:25, :59:26]
wire [6:0] roundMask_lsbs_1 = roundMask_lsbs[6:0]; // @[primitives.scala:59:26]
wire roundMask_msb_2 = roundMask_lsbs_1[6]; // @[primitives.scala:58:25, :59:26]
wire roundMask_msb_3 = roundMask_lsbs_1[6]; // @[primitives.scala:58:25, :59:26]
wire [5:0] roundMask_lsbs_2 = roundMask_lsbs_1[5:0]; // @[primitives.scala:59:26]
wire [5:0] roundMask_lsbs_3 = roundMask_lsbs_1[5:0]; // @[primitives.scala:59:26]
wire [64:0] roundMask_shift = $signed(65'sh10000000000000000 >>> roundMask_lsbs_2); // @[primitives.scala:59:26, :76:56]
wire [21:0] _roundMask_T_2 = roundMask_shift[63:42]; // @[primitives.scala:76:56, :78:22]
wire [15:0] _roundMask_T_3 = _roundMask_T_2[15:0]; // @[primitives.scala:77:20, :78:22]
wire [7:0] _roundMask_T_6 = _roundMask_T_3[15:8]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_7 = {8'h0, _roundMask_T_6}; // @[primitives.scala:77:20]
wire [7:0] _roundMask_T_8 = _roundMask_T_3[7:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_9 = {_roundMask_T_8, 8'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_11 = _roundMask_T_9 & 16'hFF00; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_12 = _roundMask_T_7 | _roundMask_T_11; // @[primitives.scala:77:20]
wire [11:0] _roundMask_T_16 = _roundMask_T_12[15:4]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_17 = {4'h0, _roundMask_T_16 & 12'hF0F}; // @[primitives.scala:77:20]
wire [11:0] _roundMask_T_18 = _roundMask_T_12[11:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_19 = {_roundMask_T_18, 4'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_21 = _roundMask_T_19 & 16'hF0F0; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_22 = _roundMask_T_17 | _roundMask_T_21; // @[primitives.scala:77:20]
wire [13:0] _roundMask_T_26 = _roundMask_T_22[15:2]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_27 = {2'h0, _roundMask_T_26 & 14'h3333}; // @[primitives.scala:77:20]
wire [13:0] _roundMask_T_28 = _roundMask_T_22[13:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_29 = {_roundMask_T_28, 2'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_31 = _roundMask_T_29 & 16'hCCCC; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_32 = _roundMask_T_27 | _roundMask_T_31; // @[primitives.scala:77:20]
wire [14:0] _roundMask_T_36 = _roundMask_T_32[15:1]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_37 = {1'h0, _roundMask_T_36 & 15'h5555}; // @[primitives.scala:77:20]
wire [14:0] _roundMask_T_38 = _roundMask_T_32[14:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_39 = {_roundMask_T_38, 1'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_41 = _roundMask_T_39 & 16'hAAAA; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_42 = _roundMask_T_37 | _roundMask_T_41; // @[primitives.scala:77:20]
wire [5:0] _roundMask_T_43 = _roundMask_T_2[21:16]; // @[primitives.scala:77:20, :78:22]
wire [3:0] _roundMask_T_44 = _roundMask_T_43[3:0]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_45 = _roundMask_T_44[1:0]; // @[primitives.scala:77:20]
wire _roundMask_T_46 = _roundMask_T_45[0]; // @[primitives.scala:77:20]
wire _roundMask_T_47 = _roundMask_T_45[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_48 = {_roundMask_T_46, _roundMask_T_47}; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_49 = _roundMask_T_44[3:2]; // @[primitives.scala:77:20]
wire _roundMask_T_50 = _roundMask_T_49[0]; // @[primitives.scala:77:20]
wire _roundMask_T_51 = _roundMask_T_49[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_52 = {_roundMask_T_50, _roundMask_T_51}; // @[primitives.scala:77:20]
wire [3:0] _roundMask_T_53 = {_roundMask_T_48, _roundMask_T_52}; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_54 = _roundMask_T_43[5:4]; // @[primitives.scala:77:20]
wire _roundMask_T_55 = _roundMask_T_54[0]; // @[primitives.scala:77:20]
wire _roundMask_T_56 = _roundMask_T_54[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_57 = {_roundMask_T_55, _roundMask_T_56}; // @[primitives.scala:77:20]
wire [5:0] _roundMask_T_58 = {_roundMask_T_53, _roundMask_T_57}; // @[primitives.scala:77:20]
wire [21:0] _roundMask_T_59 = {_roundMask_T_42, _roundMask_T_58}; // @[primitives.scala:77:20]
wire [21:0] _roundMask_T_60 = ~_roundMask_T_59; // @[primitives.scala:73:32, :77:20]
wire [21:0] _roundMask_T_61 = roundMask_msb_2 ? 22'h0 : _roundMask_T_60; // @[primitives.scala:58:25, :73:{21,32}]
wire [21:0] _roundMask_T_62 = ~_roundMask_T_61; // @[primitives.scala:73:{17,21}]
wire [24:0] _roundMask_T_63 = {_roundMask_T_62, 3'h7}; // @[primitives.scala:68:58, :73:17]
wire [64:0] roundMask_shift_1 = $signed(65'sh10000000000000000 >>> roundMask_lsbs_3); // @[primitives.scala:59:26, :76:56]
wire [2:0] _roundMask_T_64 = roundMask_shift_1[2:0]; // @[primitives.scala:76:56, :78:22]
wire [1:0] _roundMask_T_65 = _roundMask_T_64[1:0]; // @[primitives.scala:77:20, :78:22]
wire _roundMask_T_66 = _roundMask_T_65[0]; // @[primitives.scala:77:20]
wire _roundMask_T_67 = _roundMask_T_65[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_68 = {_roundMask_T_66, _roundMask_T_67}; // @[primitives.scala:77:20]
wire _roundMask_T_69 = _roundMask_T_64[2]; // @[primitives.scala:77:20, :78:22]
wire [2:0] _roundMask_T_70 = {_roundMask_T_68, _roundMask_T_69}; // @[primitives.scala:77:20]
wire [2:0] _roundMask_T_71 = roundMask_msb_3 ? _roundMask_T_70 : 3'h0; // @[primitives.scala:58:25, :62:24, :77:20]
wire [24:0] _roundMask_T_72 = roundMask_msb_1 ? _roundMask_T_63 : {22'h0, _roundMask_T_71}; // @[primitives.scala:58:25, :62:24, :67:24, :68:58]
wire [24:0] _roundMask_T_73 = roundMask_msb ? _roundMask_T_72 : 25'h0; // @[primitives.scala:58:25, :62:24, :67:24]
wire [24:0] _roundMask_T_74 = {_roundMask_T_73[24:1], _roundMask_T_73[0] | doShiftSigDown1}; // @[primitives.scala:62:24]
wire [26:0] roundMask = {_roundMask_T_74, 2'h3}; // @[RoundAnyRawFNToRecFN.scala:159:{23,42}]
wire [27:0] _shiftedRoundMask_T = {1'h0, roundMask}; // @[RoundAnyRawFNToRecFN.scala:159:42, :162:41]
wire [26:0] shiftedRoundMask = _shiftedRoundMask_T[27:1]; // @[RoundAnyRawFNToRecFN.scala:162:{41,53}]
wire [26:0] _roundPosMask_T = ~shiftedRoundMask; // @[RoundAnyRawFNToRecFN.scala:162:53, :163:28]
wire [26:0] roundPosMask = _roundPosMask_T & roundMask; // @[RoundAnyRawFNToRecFN.scala:159:42, :163:{28,46}]
wire [26:0] _roundPosBit_T = adjustedSig & roundPosMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :163:46, :164:40]
wire roundPosBit = |_roundPosBit_T; // @[RoundAnyRawFNToRecFN.scala:164:{40,56}]
wire _roundIncr_T_1 = roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :169:67]
wire _roundedSig_T_3 = roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :175:49]
wire [26:0] _anyRoundExtra_T = adjustedSig & shiftedRoundMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :162:53, :165:42]
wire anyRoundExtra = |_anyRoundExtra_T; // @[RoundAnyRawFNToRecFN.scala:165:{42,62}]
wire anyRound = roundPosBit | anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:164:56, :165:62, :166:36]
wire roundIncr = _roundIncr_T_1; // @[RoundAnyRawFNToRecFN.scala:169:67, :170:31]
wire [26:0] _roundedSig_T = adjustedSig | roundMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :159:42, :174:32]
wire [24:0] _roundedSig_T_1 = _roundedSig_T[26:2]; // @[RoundAnyRawFNToRecFN.scala:174:{32,44}]
wire [25:0] _roundedSig_T_2 = {1'h0, _roundedSig_T_1} + 26'h1; // @[RoundAnyRawFNToRecFN.scala:174:{44,49}]
wire _roundedSig_T_4 = ~anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:165:62, :176:30]
wire _roundedSig_T_5 = _roundedSig_T_3 & _roundedSig_T_4; // @[RoundAnyRawFNToRecFN.scala:175:{49,64}, :176:30]
wire [25:0] _roundedSig_T_6 = roundMask[26:1]; // @[RoundAnyRawFNToRecFN.scala:159:42, :177:35]
wire [25:0] _roundedSig_T_7 = _roundedSig_T_5 ? _roundedSig_T_6 : 26'h0; // @[RoundAnyRawFNToRecFN.scala:175:{25,64}, :177:35]
wire [25:0] _roundedSig_T_8 = ~_roundedSig_T_7; // @[RoundAnyRawFNToRecFN.scala:175:{21,25}]
wire [25:0] _roundedSig_T_9 = _roundedSig_T_2 & _roundedSig_T_8; // @[RoundAnyRawFNToRecFN.scala:174:{49,57}, :175:21]
wire [26:0] _roundedSig_T_10 = ~roundMask; // @[RoundAnyRawFNToRecFN.scala:159:42, :180:32]
wire [26:0] _roundedSig_T_11 = adjustedSig & _roundedSig_T_10; // @[RoundAnyRawFNToRecFN.scala:114:22, :180:{30,32}]
wire [24:0] _roundedSig_T_12 = _roundedSig_T_11[26:2]; // @[RoundAnyRawFNToRecFN.scala:180:{30,43}]
wire [25:0] _roundedSig_T_14 = roundPosMask[26:1]; // @[RoundAnyRawFNToRecFN.scala:163:46, :181:67]
wire [25:0] _roundedSig_T_16 = {1'h0, _roundedSig_T_12}; // @[RoundAnyRawFNToRecFN.scala:180:{43,47}]
wire [25:0] roundedSig = roundIncr ? _roundedSig_T_9 : _roundedSig_T_16; // @[RoundAnyRawFNToRecFN.scala:170:31, :173:16, :174:57, :180:47]
wire [1:0] _sRoundedExp_T = roundedSig[25:24]; // @[RoundAnyRawFNToRecFN.scala:173:16, :185:54]
wire [2:0] _sRoundedExp_T_1 = {1'h0, _sRoundedExp_T}; // @[RoundAnyRawFNToRecFN.scala:185:{54,76}]
wire [10:0] sRoundedExp = {io_in_sExp_0[9], io_in_sExp_0} + {{8{_sRoundedExp_T_1[2]}}, _sRoundedExp_T_1}; // @[RoundAnyRawFNToRecFN.scala:48:5, :185:{40,76}]
assign _common_expOut_T = sRoundedExp[8:0]; // @[RoundAnyRawFNToRecFN.scala:185:40, :187:37]
assign common_expOut = _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:122:31, :187:37]
wire [22:0] _common_fractOut_T = roundedSig[23:1]; // @[RoundAnyRawFNToRecFN.scala:173:16, :190:27]
wire [22:0] _common_fractOut_T_1 = roundedSig[22:0]; // @[RoundAnyRawFNToRecFN.scala:173:16, :191:27]
assign _common_fractOut_T_2 = doShiftSigDown1 ? _common_fractOut_T : _common_fractOut_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :189:16, :190:27, :191:27]
assign common_fractOut = _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:123:31, :189:16]
wire [3:0] _common_overflow_T = sRoundedExp[10:7]; // @[RoundAnyRawFNToRecFN.scala:185:40, :196:30]
assign _common_overflow_T_1 = $signed(_common_overflow_T) > 4'sh2; // @[RoundAnyRawFNToRecFN.scala:196:{30,50}]
assign common_overflow = _common_overflow_T_1; // @[RoundAnyRawFNToRecFN.scala:124:37, :196:50]
assign _common_totalUnderflow_T = $signed(sRoundedExp) < 11'sh6B; // @[RoundAnyRawFNToRecFN.scala:185:40, :200:31]
assign common_totalUnderflow = _common_totalUnderflow_T; // @[RoundAnyRawFNToRecFN.scala:125:37, :200:31]
wire _unboundedRange_roundPosBit_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:45]
wire _unboundedRange_anyRound_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:45, :205:44]
wire _unboundedRange_roundPosBit_T_1 = adjustedSig[1]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:61]
wire unboundedRange_roundPosBit = doShiftSigDown1 ? _unboundedRange_roundPosBit_T : _unboundedRange_roundPosBit_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :203:{16,45,61}]
wire _unboundedRange_roundIncr_T_1 = unboundedRange_roundPosBit; // @[RoundAnyRawFNToRecFN.scala:203:16, :207:67]
wire _unboundedRange_anyRound_T_1 = doShiftSigDown1 & _unboundedRange_anyRound_T; // @[RoundAnyRawFNToRecFN.scala:120:57, :205:{30,44}]
wire [1:0] _unboundedRange_anyRound_T_2 = adjustedSig[1:0]; // @[RoundAnyRawFNToRecFN.scala:114:22, :205:63]
wire _unboundedRange_anyRound_T_3 = |_unboundedRange_anyRound_T_2; // @[RoundAnyRawFNToRecFN.scala:205:{63,70}]
wire unboundedRange_anyRound = _unboundedRange_anyRound_T_1 | _unboundedRange_anyRound_T_3; // @[RoundAnyRawFNToRecFN.scala:205:{30,49,70}]
wire unboundedRange_roundIncr = _unboundedRange_roundIncr_T_1; // @[RoundAnyRawFNToRecFN.scala:207:67, :208:46]
wire _roundCarry_T = roundedSig[25]; // @[RoundAnyRawFNToRecFN.scala:173:16, :212:27]
wire _roundCarry_T_1 = roundedSig[24]; // @[RoundAnyRawFNToRecFN.scala:173:16, :213:27]
wire roundCarry = doShiftSigDown1 ? _roundCarry_T : _roundCarry_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :211:16, :212:27, :213:27]
wire [1:0] _common_underflow_T = io_in_sExp_0[9:8]; // @[RoundAnyRawFNToRecFN.scala:48:5, :220:49]
wire _common_underflow_T_1 = _common_underflow_T != 2'h1; // @[RoundAnyRawFNToRecFN.scala:220:{49,64}]
wire _common_underflow_T_2 = anyRound & _common_underflow_T_1; // @[RoundAnyRawFNToRecFN.scala:166:36, :220:{32,64}]
wire _common_underflow_T_3 = roundMask[3]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:57]
wire _common_underflow_T_9 = roundMask[3]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:57, :225:49]
wire _common_underflow_T_4 = roundMask[2]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:71]
wire _common_underflow_T_5 = doShiftSigDown1 ? _common_underflow_T_3 : _common_underflow_T_4; // @[RoundAnyRawFNToRecFN.scala:120:57, :221:{30,57,71}]
wire _common_underflow_T_6 = _common_underflow_T_2 & _common_underflow_T_5; // @[RoundAnyRawFNToRecFN.scala:220:{32,72}, :221:30]
wire _common_underflow_T_8 = roundMask[4]; // @[RoundAnyRawFNToRecFN.scala:159:42, :224:49]
wire _common_underflow_T_10 = doShiftSigDown1 ? _common_underflow_T_8 : _common_underflow_T_9; // @[RoundAnyRawFNToRecFN.scala:120:57, :223:39, :224:49, :225:49]
wire _common_underflow_T_11 = ~_common_underflow_T_10; // @[RoundAnyRawFNToRecFN.scala:223:{34,39}]
wire _common_underflow_T_12 = _common_underflow_T_11; // @[RoundAnyRawFNToRecFN.scala:222:77, :223:34]
wire _common_underflow_T_13 = _common_underflow_T_12 & roundCarry; // @[RoundAnyRawFNToRecFN.scala:211:16, :222:77, :226:38]
wire _common_underflow_T_14 = _common_underflow_T_13 & roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :226:38, :227:45]
wire _common_underflow_T_15 = _common_underflow_T_14 & unboundedRange_roundIncr; // @[RoundAnyRawFNToRecFN.scala:208:46, :227:{45,60}]
wire _common_underflow_T_16 = ~_common_underflow_T_15; // @[RoundAnyRawFNToRecFN.scala:222:27, :227:60]
wire _common_underflow_T_17 = _common_underflow_T_6 & _common_underflow_T_16; // @[RoundAnyRawFNToRecFN.scala:220:72, :221:76, :222:27]
assign _common_underflow_T_18 = common_totalUnderflow | _common_underflow_T_17; // @[RoundAnyRawFNToRecFN.scala:125:37, :217:40, :221:76]
assign common_underflow = _common_underflow_T_18; // @[RoundAnyRawFNToRecFN.scala:126:37, :217:40]
assign _common_inexact_T = common_totalUnderflow | anyRound; // @[RoundAnyRawFNToRecFN.scala:125:37, :166:36, :230:49]
assign common_inexact = _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:127:37, :230:49]
wire isNaNOut = io_invalidExc_0 | io_in_isNaN_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34]
wire _commonCase_T = ~isNaNOut; // @[RoundAnyRawFNToRecFN.scala:235:34, :237:22]
wire _commonCase_T_1 = ~notNaN_isSpecialInfOut; // @[RoundAnyRawFNToRecFN.scala:236:49, :237:36]
wire _commonCase_T_2 = _commonCase_T & _commonCase_T_1; // @[RoundAnyRawFNToRecFN.scala:237:{22,33,36}]
wire _commonCase_T_3 = ~io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :237:64]
wire commonCase = _commonCase_T_2 & _commonCase_T_3; // @[RoundAnyRawFNToRecFN.scala:237:{33,61,64}]
wire overflow = commonCase & common_overflow; // @[RoundAnyRawFNToRecFN.scala:124:37, :237:61, :238:32]
wire _notNaN_isInfOut_T = overflow; // @[RoundAnyRawFNToRecFN.scala:238:32, :248:45]
wire underflow = commonCase & common_underflow; // @[RoundAnyRawFNToRecFN.scala:126:37, :237:61, :239:32]
wire _inexact_T = commonCase & common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37, :237:61, :240:43]
wire inexact = overflow | _inexact_T; // @[RoundAnyRawFNToRecFN.scala:238:32, :240:{28,43}]
wire _pegMinNonzeroMagOut_T = commonCase & common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37, :237:61, :245:20]
wire notNaN_isInfOut = notNaN_isSpecialInfOut | _notNaN_isInfOut_T; // @[RoundAnyRawFNToRecFN.scala:236:49, :248:{32,45}]
wire signOut = ~isNaNOut & io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34, :250:22]
wire _expOut_T = io_in_isZero_0 | common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:48:5, :125:37, :253:32]
wire [8:0] _expOut_T_1 = _expOut_T ? 9'h1C0 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:{18,32}]
wire [8:0] _expOut_T_2 = ~_expOut_T_1; // @[RoundAnyRawFNToRecFN.scala:253:{14,18}]
wire [8:0] _expOut_T_3 = common_expOut & _expOut_T_2; // @[RoundAnyRawFNToRecFN.scala:122:31, :252:24, :253:14]
wire [8:0] _expOut_T_7 = _expOut_T_3; // @[RoundAnyRawFNToRecFN.scala:252:24, :256:17]
wire [8:0] _expOut_T_10 = _expOut_T_7; // @[RoundAnyRawFNToRecFN.scala:256:17, :260:17]
wire [8:0] _expOut_T_11 = {2'h0, notNaN_isInfOut, 6'h0}; // @[RoundAnyRawFNToRecFN.scala:248:32, :265:18]
wire [8:0] _expOut_T_12 = ~_expOut_T_11; // @[RoundAnyRawFNToRecFN.scala:265:{14,18}]
wire [8:0] _expOut_T_13 = _expOut_T_10 & _expOut_T_12; // @[RoundAnyRawFNToRecFN.scala:260:17, :264:17, :265:14]
wire [8:0] _expOut_T_15 = _expOut_T_13; // @[RoundAnyRawFNToRecFN.scala:264:17, :268:18]
wire [8:0] _expOut_T_17 = _expOut_T_15; // @[RoundAnyRawFNToRecFN.scala:268:18, :272:15]
wire [8:0] _expOut_T_18 = notNaN_isInfOut ? 9'h180 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:248:32, :277:16]
wire [8:0] _expOut_T_19 = _expOut_T_17 | _expOut_T_18; // @[RoundAnyRawFNToRecFN.scala:272:15, :276:15, :277:16]
wire [8:0] _expOut_T_20 = isNaNOut ? 9'h1C0 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:235:34, :278:16]
wire [8:0] expOut = _expOut_T_19 | _expOut_T_20; // @[RoundAnyRawFNToRecFN.scala:276:15, :277:73, :278:16]
wire _fractOut_T = isNaNOut | io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34, :280:22]
wire _fractOut_T_1 = _fractOut_T | common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37, :280:{22,38}]
wire [22:0] _fractOut_T_2 = {isNaNOut, 22'h0}; // @[RoundAnyRawFNToRecFN.scala:235:34, :281:16]
wire [22:0] _fractOut_T_3 = _fractOut_T_1 ? _fractOut_T_2 : common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31, :280:{12,38}, :281:16]
wire [22:0] fractOut = _fractOut_T_3; // @[RoundAnyRawFNToRecFN.scala:280:12, :283:11]
wire [9:0] _io_out_T = {signOut, expOut}; // @[RoundAnyRawFNToRecFN.scala:250:22, :277:73, :286:23]
assign _io_out_T_1 = {_io_out_T, fractOut}; // @[RoundAnyRawFNToRecFN.scala:283:11, :286:{23,33}]
assign io_out_0 = _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:48:5, :286:33]
wire [1:0] _io_exceptionFlags_T = {io_invalidExc_0, 1'h0}; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:23]
wire [2:0] _io_exceptionFlags_T_1 = {_io_exceptionFlags_T, overflow}; // @[RoundAnyRawFNToRecFN.scala:238:32, :288:{23,41}]
wire [3:0] _io_exceptionFlags_T_2 = {_io_exceptionFlags_T_1, underflow}; // @[RoundAnyRawFNToRecFN.scala:239:32, :288:{41,53}]
assign _io_exceptionFlags_T_3 = {_io_exceptionFlags_T_2, inexact}; // @[RoundAnyRawFNToRecFN.scala:240:28, :288:{53,66}]
assign io_exceptionFlags_0 = _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:66]
assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File util.scala:
//******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v4.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v4.common.{MicroOp}
import boom.v4.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, flush: Bool, uop: MicroOp): Bool = {
return apply(brupdate, flush, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, flush: Bool, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask) || flush
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: T): Bool = {
return apply(brupdate, flush, bundle.uop)
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Bool = {
return apply(brupdate, flush, bundle.bits)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, flush, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v4.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U, IS_N}
def apply(i: UInt, isel: UInt): UInt = {
val ip = Mux(isel === IS_N, 0.U(LONGEST_IMM_SZ.W), i)
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0)
}
}
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
object IsYoungerMask
{
def apply(i: UInt, head: UInt, n: Integer): UInt = {
val hi_mask = ~MaskLower(UIntToOH(i)(n-1,0))
val lo_mask = ~MaskUpper(UIntToOH(head)(n-1,0))
Mux(i < head, hi_mask & lo_mask, hi_mask | lo_mask)(n-1,0)
}
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v4.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v4.common.MicroOp => Bool = u => true.B, fastDeq: Boolean = false)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
if (fastDeq && entries > 1) {
// Pipeline dequeue selection so the mux gets an entire cycle
val main = Module(new BranchKillableQueue(gen, entries-1, flush_fn, false))
val out_reg = Reg(gen)
val out_valid = RegInit(false.B)
val out_uop = Reg(new MicroOp)
main.io.enq <> io.enq
main.io.brupdate := io.brupdate
main.io.flush := io.flush
io.empty := main.io.empty && !out_valid
io.count := main.io.count + out_valid
io.deq.valid := out_valid
io.deq.bits := out_reg
io.deq.bits.uop := out_uop
out_uop := UpdateBrMask(io.brupdate, out_uop)
out_valid := out_valid && !IsKilledByBranch(io.brupdate, false.B, out_uop) && !(io.flush && flush_fn(out_uop))
main.io.deq.ready := false.B
when (io.deq.fire || !out_valid) {
out_valid := main.io.deq.valid && !IsKilledByBranch(io.brupdate, false.B, main.io.deq.bits.uop) && !(io.flush && flush_fn(main.io.deq.bits.uop))
out_reg := main.io.deq.bits
out_uop := UpdateBrMask(io.brupdate, main.io.deq.bits.uop)
main.io.deq.ready := true.B
}
} else {
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire && !IsKilledByBranch(io.brupdate, false.B, io.enq.bits.uop) && !(io.flush && flush_fn(io.enq.bits.uop)))
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, false.B, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value)
io.deq.bits := out
val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
}
class BranchKillablePipeline[T <: boom.v4.common.HasBoomUOP](gen: T, stages: Int)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val req = Input(Valid(gen))
val flush = Input(Bool())
val brupdate = Input(new BrUpdateInfo)
val resp = Output(Vec(stages, Valid(gen)))
})
require(stages > 0)
val uops = Reg(Vec(stages, Valid(gen)))
uops(0).valid := io.req.valid && !IsKilledByBranch(io.brupdate, io.flush, io.req.bits)
uops(0).bits := UpdateBrMask(io.brupdate, io.req.bits)
for (i <- 1 until stages) {
uops(i).valid := uops(i-1).valid && !IsKilledByBranch(io.brupdate, io.flush, uops(i-1).bits)
uops(i).bits := UpdateBrMask(io.brupdate, uops(i-1).bits)
}
for (i <- 0 until stages) { when (reset.asBool) { uops(i).valid := false.B } }
io.resp := uops
}
| module BranchKillableQueue_5( // @[util.scala:458:7]
input clock, // @[util.scala:458:7]
input reset, // @[util.scala:458:7]
output io_enq_ready, // @[util.scala:463:14]
input io_enq_valid, // @[util.scala:463:14]
input [31:0] io_enq_bits_uop_inst, // @[util.scala:463:14]
input [31:0] io_enq_bits_uop_debug_inst, // @[util.scala:463:14]
input io_enq_bits_uop_is_rvc, // @[util.scala:463:14]
input [39:0] io_enq_bits_uop_debug_pc, // @[util.scala:463:14]
input io_enq_bits_uop_iq_type_0, // @[util.scala:463:14]
input io_enq_bits_uop_iq_type_1, // @[util.scala:463:14]
input io_enq_bits_uop_iq_type_2, // @[util.scala:463:14]
input io_enq_bits_uop_iq_type_3, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_0, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_1, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_2, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_3, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_4, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_5, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_6, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_7, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_8, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_9, // @[util.scala:463:14]
input io_enq_bits_uop_iw_issued, // @[util.scala:463:14]
input io_enq_bits_uop_iw_issued_partial_agen, // @[util.scala:463:14]
input io_enq_bits_uop_iw_issued_partial_dgen, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_iw_p1_speculative_child, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_iw_p2_speculative_child, // @[util.scala:463:14]
input io_enq_bits_uop_iw_p1_bypass_hint, // @[util.scala:463:14]
input io_enq_bits_uop_iw_p2_bypass_hint, // @[util.scala:463:14]
input io_enq_bits_uop_iw_p3_bypass_hint, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_dis_col_sel, // @[util.scala:463:14]
input [11:0] io_enq_bits_uop_br_mask, // @[util.scala:463:14]
input [3:0] io_enq_bits_uop_br_tag, // @[util.scala:463:14]
input [3:0] io_enq_bits_uop_br_type, // @[util.scala:463:14]
input io_enq_bits_uop_is_sfb, // @[util.scala:463:14]
input io_enq_bits_uop_is_fence, // @[util.scala:463:14]
input io_enq_bits_uop_is_fencei, // @[util.scala:463:14]
input io_enq_bits_uop_is_sfence, // @[util.scala:463:14]
input io_enq_bits_uop_is_amo, // @[util.scala:463:14]
input io_enq_bits_uop_is_eret, // @[util.scala:463:14]
input io_enq_bits_uop_is_sys_pc2epc, // @[util.scala:463:14]
input io_enq_bits_uop_is_rocc, // @[util.scala:463:14]
input io_enq_bits_uop_is_mov, // @[util.scala:463:14]
input [4:0] io_enq_bits_uop_ftq_idx, // @[util.scala:463:14]
input io_enq_bits_uop_edge_inst, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_pc_lob, // @[util.scala:463:14]
input io_enq_bits_uop_taken, // @[util.scala:463:14]
input io_enq_bits_uop_imm_rename, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_imm_sel, // @[util.scala:463:14]
input [4:0] io_enq_bits_uop_pimm, // @[util.scala:463:14]
input [19:0] io_enq_bits_uop_imm_packed, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_op1_sel, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_op2_sel, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_ldst, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_wen, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_ren1, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_ren2, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_ren3, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_swap12, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_swap23, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_fp_ctrl_typeTagIn, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_fp_ctrl_typeTagOut, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_fromint, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_toint, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_fastpipe, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_fma, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_div, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_sqrt, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_wflags, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_vec, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_rob_idx, // @[util.scala:463:14]
input [3:0] io_enq_bits_uop_ldq_idx, // @[util.scala:463:14]
input [3:0] io_enq_bits_uop_stq_idx, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_rxq_idx, // @[util.scala:463:14]
input [6:0] io_enq_bits_uop_pdst, // @[util.scala:463:14]
input [6:0] io_enq_bits_uop_prs1, // @[util.scala:463:14]
input [6:0] io_enq_bits_uop_prs2, // @[util.scala:463:14]
input [6:0] io_enq_bits_uop_prs3, // @[util.scala:463:14]
input [4:0] io_enq_bits_uop_ppred, // @[util.scala:463:14]
input io_enq_bits_uop_prs1_busy, // @[util.scala:463:14]
input io_enq_bits_uop_prs2_busy, // @[util.scala:463:14]
input io_enq_bits_uop_prs3_busy, // @[util.scala:463:14]
input io_enq_bits_uop_ppred_busy, // @[util.scala:463:14]
input [6:0] io_enq_bits_uop_stale_pdst, // @[util.scala:463:14]
input io_enq_bits_uop_exception, // @[util.scala:463:14]
input [63:0] io_enq_bits_uop_exc_cause, // @[util.scala:463:14]
input [4:0] io_enq_bits_uop_mem_cmd, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_mem_size, // @[util.scala:463:14]
input io_enq_bits_uop_mem_signed, // @[util.scala:463:14]
input io_enq_bits_uop_uses_ldq, // @[util.scala:463:14]
input io_enq_bits_uop_uses_stq, // @[util.scala:463:14]
input io_enq_bits_uop_is_unique, // @[util.scala:463:14]
input io_enq_bits_uop_flush_on_commit, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_csr_cmd, // @[util.scala:463:14]
input io_enq_bits_uop_ldst_is_rs1, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_ldst, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_lrs1, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_lrs2, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_lrs3, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_dst_rtype, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_lrs1_rtype, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_lrs2_rtype, // @[util.scala:463:14]
input io_enq_bits_uop_frs3_en, // @[util.scala:463:14]
input io_enq_bits_uop_fcn_dw, // @[util.scala:463:14]
input [4:0] io_enq_bits_uop_fcn_op, // @[util.scala:463:14]
input io_enq_bits_uop_fp_val, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_fp_rm, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_fp_typ, // @[util.scala:463:14]
input io_enq_bits_uop_xcpt_pf_if, // @[util.scala:463:14]
input io_enq_bits_uop_xcpt_ae_if, // @[util.scala:463:14]
input io_enq_bits_uop_xcpt_ma_if, // @[util.scala:463:14]
input io_enq_bits_uop_bp_debug_if, // @[util.scala:463:14]
input io_enq_bits_uop_bp_xcpt_if, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_debug_fsrc, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_debug_tsrc, // @[util.scala:463:14]
input [64:0] io_enq_bits_data, // @[util.scala:463:14]
input io_enq_bits_fflags_valid, // @[util.scala:463:14]
input [4:0] io_enq_bits_fflags_bits, // @[util.scala:463:14]
input io_deq_ready, // @[util.scala:463:14]
output io_deq_valid, // @[util.scala:463:14]
output [31:0] io_deq_bits_uop_inst, // @[util.scala:463:14]
output [31:0] io_deq_bits_uop_debug_inst, // @[util.scala:463:14]
output io_deq_bits_uop_is_rvc, // @[util.scala:463:14]
output [39:0] io_deq_bits_uop_debug_pc, // @[util.scala:463:14]
output io_deq_bits_uop_iq_type_0, // @[util.scala:463:14]
output io_deq_bits_uop_iq_type_1, // @[util.scala:463:14]
output io_deq_bits_uop_iq_type_2, // @[util.scala:463:14]
output io_deq_bits_uop_iq_type_3, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_0, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_1, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_2, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_3, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_4, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_5, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_6, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_7, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_8, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_9, // @[util.scala:463:14]
output io_deq_bits_uop_iw_issued, // @[util.scala:463:14]
output io_deq_bits_uop_iw_issued_partial_agen, // @[util.scala:463:14]
output io_deq_bits_uop_iw_issued_partial_dgen, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_iw_p1_speculative_child, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_iw_p2_speculative_child, // @[util.scala:463:14]
output io_deq_bits_uop_iw_p1_bypass_hint, // @[util.scala:463:14]
output io_deq_bits_uop_iw_p2_bypass_hint, // @[util.scala:463:14]
output io_deq_bits_uop_iw_p3_bypass_hint, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_dis_col_sel, // @[util.scala:463:14]
output [11:0] io_deq_bits_uop_br_mask, // @[util.scala:463:14]
output [3:0] io_deq_bits_uop_br_tag, // @[util.scala:463:14]
output [3:0] io_deq_bits_uop_br_type, // @[util.scala:463:14]
output io_deq_bits_uop_is_sfb, // @[util.scala:463:14]
output io_deq_bits_uop_is_fence, // @[util.scala:463:14]
output io_deq_bits_uop_is_fencei, // @[util.scala:463:14]
output io_deq_bits_uop_is_sfence, // @[util.scala:463:14]
output io_deq_bits_uop_is_amo, // @[util.scala:463:14]
output io_deq_bits_uop_is_eret, // @[util.scala:463:14]
output io_deq_bits_uop_is_sys_pc2epc, // @[util.scala:463:14]
output io_deq_bits_uop_is_rocc, // @[util.scala:463:14]
output io_deq_bits_uop_is_mov, // @[util.scala:463:14]
output [4:0] io_deq_bits_uop_ftq_idx, // @[util.scala:463:14]
output io_deq_bits_uop_edge_inst, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_pc_lob, // @[util.scala:463:14]
output io_deq_bits_uop_taken, // @[util.scala:463:14]
output io_deq_bits_uop_imm_rename, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_imm_sel, // @[util.scala:463:14]
output [4:0] io_deq_bits_uop_pimm, // @[util.scala:463:14]
output [19:0] io_deq_bits_uop_imm_packed, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_op1_sel, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_op2_sel, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_ldst, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_wen, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_ren1, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_ren2, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_ren3, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_swap12, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_swap23, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_fp_ctrl_typeTagIn, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_fp_ctrl_typeTagOut, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_fromint, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_toint, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_fastpipe, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_fma, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_div, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_sqrt, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_wflags, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_vec, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_rob_idx, // @[util.scala:463:14]
output [3:0] io_deq_bits_uop_ldq_idx, // @[util.scala:463:14]
output [3:0] io_deq_bits_uop_stq_idx, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_rxq_idx, // @[util.scala:463:14]
output [6:0] io_deq_bits_uop_pdst, // @[util.scala:463:14]
output [6:0] io_deq_bits_uop_prs1, // @[util.scala:463:14]
output [6:0] io_deq_bits_uop_prs2, // @[util.scala:463:14]
output [6:0] io_deq_bits_uop_prs3, // @[util.scala:463:14]
output [4:0] io_deq_bits_uop_ppred, // @[util.scala:463:14]
output io_deq_bits_uop_prs1_busy, // @[util.scala:463:14]
output io_deq_bits_uop_prs2_busy, // @[util.scala:463:14]
output io_deq_bits_uop_prs3_busy, // @[util.scala:463:14]
output io_deq_bits_uop_ppred_busy, // @[util.scala:463:14]
output [6:0] io_deq_bits_uop_stale_pdst, // @[util.scala:463:14]
output io_deq_bits_uop_exception, // @[util.scala:463:14]
output [63:0] io_deq_bits_uop_exc_cause, // @[util.scala:463:14]
output [4:0] io_deq_bits_uop_mem_cmd, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_mem_size, // @[util.scala:463:14]
output io_deq_bits_uop_mem_signed, // @[util.scala:463:14]
output io_deq_bits_uop_uses_ldq, // @[util.scala:463:14]
output io_deq_bits_uop_uses_stq, // @[util.scala:463:14]
output io_deq_bits_uop_is_unique, // @[util.scala:463:14]
output io_deq_bits_uop_flush_on_commit, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_csr_cmd, // @[util.scala:463:14]
output io_deq_bits_uop_ldst_is_rs1, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_ldst, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_lrs1, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_lrs2, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_lrs3, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_dst_rtype, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_lrs1_rtype, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_lrs2_rtype, // @[util.scala:463:14]
output io_deq_bits_uop_frs3_en, // @[util.scala:463:14]
output io_deq_bits_uop_fcn_dw, // @[util.scala:463:14]
output [4:0] io_deq_bits_uop_fcn_op, // @[util.scala:463:14]
output io_deq_bits_uop_fp_val, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_fp_rm, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_fp_typ, // @[util.scala:463:14]
output io_deq_bits_uop_xcpt_pf_if, // @[util.scala:463:14]
output io_deq_bits_uop_xcpt_ae_if, // @[util.scala:463:14]
output io_deq_bits_uop_xcpt_ma_if, // @[util.scala:463:14]
output io_deq_bits_uop_bp_debug_if, // @[util.scala:463:14]
output io_deq_bits_uop_bp_xcpt_if, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_debug_fsrc, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_debug_tsrc, // @[util.scala:463:14]
output [64:0] io_deq_bits_data, // @[util.scala:463:14]
output io_deq_bits_predicated, // @[util.scala:463:14]
output io_deq_bits_fflags_valid, // @[util.scala:463:14]
output [4:0] io_deq_bits_fflags_bits, // @[util.scala:463:14]
input [11:0] io_brupdate_b1_resolve_mask, // @[util.scala:463:14]
input [11:0] io_brupdate_b1_mispredict_mask, // @[util.scala:463:14]
input [31:0] io_brupdate_b2_uop_inst, // @[util.scala:463:14]
input [31:0] io_brupdate_b2_uop_debug_inst, // @[util.scala:463:14]
input io_brupdate_b2_uop_is_rvc, // @[util.scala:463:14]
input [39:0] io_brupdate_b2_uop_debug_pc, // @[util.scala:463:14]
input io_brupdate_b2_uop_iq_type_0, // @[util.scala:463:14]
input io_brupdate_b2_uop_iq_type_1, // @[util.scala:463:14]
input io_brupdate_b2_uop_iq_type_2, // @[util.scala:463:14]
input io_brupdate_b2_uop_iq_type_3, // @[util.scala:463:14]
input io_brupdate_b2_uop_fu_code_0, // @[util.scala:463:14]
input io_brupdate_b2_uop_fu_code_1, // @[util.scala:463:14]
input io_brupdate_b2_uop_fu_code_2, // @[util.scala:463:14]
input io_brupdate_b2_uop_fu_code_3, // @[util.scala:463:14]
input io_brupdate_b2_uop_fu_code_4, // @[util.scala:463:14]
input io_brupdate_b2_uop_fu_code_5, // @[util.scala:463:14]
input io_brupdate_b2_uop_fu_code_6, // @[util.scala:463:14]
input io_brupdate_b2_uop_fu_code_7, // @[util.scala:463:14]
input io_brupdate_b2_uop_fu_code_8, // @[util.scala:463:14]
input io_brupdate_b2_uop_fu_code_9, // @[util.scala:463:14]
input io_brupdate_b2_uop_iw_issued, // @[util.scala:463:14]
input io_brupdate_b2_uop_iw_issued_partial_agen, // @[util.scala:463:14]
input io_brupdate_b2_uop_iw_issued_partial_dgen, // @[util.scala:463:14]
input [1:0] io_brupdate_b2_uop_iw_p1_speculative_child, // @[util.scala:463:14]
input [1:0] io_brupdate_b2_uop_iw_p2_speculative_child, // @[util.scala:463:14]
input io_brupdate_b2_uop_iw_p1_bypass_hint, // @[util.scala:463:14]
input io_brupdate_b2_uop_iw_p2_bypass_hint, // @[util.scala:463:14]
input io_brupdate_b2_uop_iw_p3_bypass_hint, // @[util.scala:463:14]
input [1:0] io_brupdate_b2_uop_dis_col_sel, // @[util.scala:463:14]
input [11:0] io_brupdate_b2_uop_br_mask, // @[util.scala:463:14]
input [3:0] io_brupdate_b2_uop_br_tag, // @[util.scala:463:14]
input [3:0] io_brupdate_b2_uop_br_type, // @[util.scala:463:14]
input io_brupdate_b2_uop_is_sfb, // @[util.scala:463:14]
input io_brupdate_b2_uop_is_fence, // @[util.scala:463:14]
input io_brupdate_b2_uop_is_fencei, // @[util.scala:463:14]
input io_brupdate_b2_uop_is_sfence, // @[util.scala:463:14]
input io_brupdate_b2_uop_is_amo, // @[util.scala:463:14]
input io_brupdate_b2_uop_is_eret, // @[util.scala:463:14]
input io_brupdate_b2_uop_is_sys_pc2epc, // @[util.scala:463:14]
input io_brupdate_b2_uop_is_rocc, // @[util.scala:463:14]
input io_brupdate_b2_uop_is_mov, // @[util.scala:463:14]
input [4:0] io_brupdate_b2_uop_ftq_idx, // @[util.scala:463:14]
input io_brupdate_b2_uop_edge_inst, // @[util.scala:463:14]
input [5:0] io_brupdate_b2_uop_pc_lob, // @[util.scala:463:14]
input io_brupdate_b2_uop_taken, // @[util.scala:463:14]
input io_brupdate_b2_uop_imm_rename, // @[util.scala:463:14]
input [2:0] io_brupdate_b2_uop_imm_sel, // @[util.scala:463:14]
input [4:0] io_brupdate_b2_uop_pimm, // @[util.scala:463:14]
input [19:0] io_brupdate_b2_uop_imm_packed, // @[util.scala:463:14]
input [1:0] io_brupdate_b2_uop_op1_sel, // @[util.scala:463:14]
input [2:0] io_brupdate_b2_uop_op2_sel, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_ldst, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_wen, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_ren1, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_ren2, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_ren3, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_swap12, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_swap23, // @[util.scala:463:14]
input [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagIn, // @[util.scala:463:14]
input [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagOut, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_fromint, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_toint, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_fastpipe, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_fma, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_div, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_sqrt, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_wflags, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_vec, // @[util.scala:463:14]
input [5:0] io_brupdate_b2_uop_rob_idx, // @[util.scala:463:14]
input [3:0] io_brupdate_b2_uop_ldq_idx, // @[util.scala:463:14]
input [3:0] io_brupdate_b2_uop_stq_idx, // @[util.scala:463:14]
input [1:0] io_brupdate_b2_uop_rxq_idx, // @[util.scala:463:14]
input [6:0] io_brupdate_b2_uop_pdst, // @[util.scala:463:14]
input [6:0] io_brupdate_b2_uop_prs1, // @[util.scala:463:14]
input [6:0] io_brupdate_b2_uop_prs2, // @[util.scala:463:14]
input [6:0] io_brupdate_b2_uop_prs3, // @[util.scala:463:14]
input [4:0] io_brupdate_b2_uop_ppred, // @[util.scala:463:14]
input io_brupdate_b2_uop_prs1_busy, // @[util.scala:463:14]
input io_brupdate_b2_uop_prs2_busy, // @[util.scala:463:14]
input io_brupdate_b2_uop_prs3_busy, // @[util.scala:463:14]
input io_brupdate_b2_uop_ppred_busy, // @[util.scala:463:14]
input [6:0] io_brupdate_b2_uop_stale_pdst, // @[util.scala:463:14]
input io_brupdate_b2_uop_exception, // @[util.scala:463:14]
input [63:0] io_brupdate_b2_uop_exc_cause, // @[util.scala:463:14]
input [4:0] io_brupdate_b2_uop_mem_cmd, // @[util.scala:463:14]
input [1:0] io_brupdate_b2_uop_mem_size, // @[util.scala:463:14]
input io_brupdate_b2_uop_mem_signed, // @[util.scala:463:14]
input io_brupdate_b2_uop_uses_ldq, // @[util.scala:463:14]
input io_brupdate_b2_uop_uses_stq, // @[util.scala:463:14]
input io_brupdate_b2_uop_is_unique, // @[util.scala:463:14]
input io_brupdate_b2_uop_flush_on_commit, // @[util.scala:463:14]
input [2:0] io_brupdate_b2_uop_csr_cmd, // @[util.scala:463:14]
input io_brupdate_b2_uop_ldst_is_rs1, // @[util.scala:463:14]
input [5:0] io_brupdate_b2_uop_ldst, // @[util.scala:463:14]
input [5:0] io_brupdate_b2_uop_lrs1, // @[util.scala:463:14]
input [5:0] io_brupdate_b2_uop_lrs2, // @[util.scala:463:14]
input [5:0] io_brupdate_b2_uop_lrs3, // @[util.scala:463:14]
input [1:0] io_brupdate_b2_uop_dst_rtype, // @[util.scala:463:14]
input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[util.scala:463:14]
input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[util.scala:463:14]
input io_brupdate_b2_uop_frs3_en, // @[util.scala:463:14]
input io_brupdate_b2_uop_fcn_dw, // @[util.scala:463:14]
input [4:0] io_brupdate_b2_uop_fcn_op, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_val, // @[util.scala:463:14]
input [2:0] io_brupdate_b2_uop_fp_rm, // @[util.scala:463:14]
input [1:0] io_brupdate_b2_uop_fp_typ, // @[util.scala:463:14]
input io_brupdate_b2_uop_xcpt_pf_if, // @[util.scala:463:14]
input io_brupdate_b2_uop_xcpt_ae_if, // @[util.scala:463:14]
input io_brupdate_b2_uop_xcpt_ma_if, // @[util.scala:463:14]
input io_brupdate_b2_uop_bp_debug_if, // @[util.scala:463:14]
input io_brupdate_b2_uop_bp_xcpt_if, // @[util.scala:463:14]
input [2:0] io_brupdate_b2_uop_debug_fsrc, // @[util.scala:463:14]
input [2:0] io_brupdate_b2_uop_debug_tsrc, // @[util.scala:463:14]
input io_brupdate_b2_mispredict, // @[util.scala:463:14]
input io_brupdate_b2_taken, // @[util.scala:463:14]
input [2:0] io_brupdate_b2_cfi_type, // @[util.scala:463:14]
input [1:0] io_brupdate_b2_pc_sel, // @[util.scala:463:14]
input [39:0] io_brupdate_b2_jalr_target, // @[util.scala:463:14]
input [20:0] io_brupdate_b2_target_offset, // @[util.scala:463:14]
input io_flush, // @[util.scala:463:14]
output [2:0] io_count // @[util.scala:463:14]
);
wire [71:0] _ram_ext_R0_data; // @[util.scala:503:22]
wire io_enq_valid_0 = io_enq_valid; // @[util.scala:458:7]
wire [31:0] io_enq_bits_uop_inst_0 = io_enq_bits_uop_inst; // @[util.scala:458:7]
wire [31:0] io_enq_bits_uop_debug_inst_0 = io_enq_bits_uop_debug_inst; // @[util.scala:458:7]
wire io_enq_bits_uop_is_rvc_0 = io_enq_bits_uop_is_rvc; // @[util.scala:458:7]
wire [39:0] io_enq_bits_uop_debug_pc_0 = io_enq_bits_uop_debug_pc; // @[util.scala:458:7]
wire io_enq_bits_uop_iq_type_0_0 = io_enq_bits_uop_iq_type_0; // @[util.scala:458:7]
wire io_enq_bits_uop_iq_type_1_0 = io_enq_bits_uop_iq_type_1; // @[util.scala:458:7]
wire io_enq_bits_uop_iq_type_2_0 = io_enq_bits_uop_iq_type_2; // @[util.scala:458:7]
wire io_enq_bits_uop_iq_type_3_0 = io_enq_bits_uop_iq_type_3; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_0_0 = io_enq_bits_uop_fu_code_0; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_1_0 = io_enq_bits_uop_fu_code_1; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_2_0 = io_enq_bits_uop_fu_code_2; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_3_0 = io_enq_bits_uop_fu_code_3; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_4_0 = io_enq_bits_uop_fu_code_4; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_5_0 = io_enq_bits_uop_fu_code_5; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_6_0 = io_enq_bits_uop_fu_code_6; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_7_0 = io_enq_bits_uop_fu_code_7; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_8_0 = io_enq_bits_uop_fu_code_8; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_9_0 = io_enq_bits_uop_fu_code_9; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_issued_0 = io_enq_bits_uop_iw_issued; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_issued_partial_agen_0 = io_enq_bits_uop_iw_issued_partial_agen; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_issued_partial_dgen_0 = io_enq_bits_uop_iw_issued_partial_dgen; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_iw_p1_speculative_child_0 = io_enq_bits_uop_iw_p1_speculative_child; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_iw_p2_speculative_child_0 = io_enq_bits_uop_iw_p2_speculative_child; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_p1_bypass_hint_0 = io_enq_bits_uop_iw_p1_bypass_hint; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_p2_bypass_hint_0 = io_enq_bits_uop_iw_p2_bypass_hint; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_p3_bypass_hint_0 = io_enq_bits_uop_iw_p3_bypass_hint; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_dis_col_sel_0 = io_enq_bits_uop_dis_col_sel; // @[util.scala:458:7]
wire [11:0] io_enq_bits_uop_br_mask_0 = io_enq_bits_uop_br_mask; // @[util.scala:458:7]
wire [3:0] io_enq_bits_uop_br_tag_0 = io_enq_bits_uop_br_tag; // @[util.scala:458:7]
wire [3:0] io_enq_bits_uop_br_type_0 = io_enq_bits_uop_br_type; // @[util.scala:458:7]
wire io_enq_bits_uop_is_sfb_0 = io_enq_bits_uop_is_sfb; // @[util.scala:458:7]
wire io_enq_bits_uop_is_fence_0 = io_enq_bits_uop_is_fence; // @[util.scala:458:7]
wire io_enq_bits_uop_is_fencei_0 = io_enq_bits_uop_is_fencei; // @[util.scala:458:7]
wire io_enq_bits_uop_is_sfence_0 = io_enq_bits_uop_is_sfence; // @[util.scala:458:7]
wire io_enq_bits_uop_is_amo_0 = io_enq_bits_uop_is_amo; // @[util.scala:458:7]
wire io_enq_bits_uop_is_eret_0 = io_enq_bits_uop_is_eret; // @[util.scala:458:7]
wire io_enq_bits_uop_is_sys_pc2epc_0 = io_enq_bits_uop_is_sys_pc2epc; // @[util.scala:458:7]
wire io_enq_bits_uop_is_rocc_0 = io_enq_bits_uop_is_rocc; // @[util.scala:458:7]
wire io_enq_bits_uop_is_mov_0 = io_enq_bits_uop_is_mov; // @[util.scala:458:7]
wire [4:0] io_enq_bits_uop_ftq_idx_0 = io_enq_bits_uop_ftq_idx; // @[util.scala:458:7]
wire io_enq_bits_uop_edge_inst_0 = io_enq_bits_uop_edge_inst; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_pc_lob_0 = io_enq_bits_uop_pc_lob; // @[util.scala:458:7]
wire io_enq_bits_uop_taken_0 = io_enq_bits_uop_taken; // @[util.scala:458:7]
wire io_enq_bits_uop_imm_rename_0 = io_enq_bits_uop_imm_rename; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_imm_sel_0 = io_enq_bits_uop_imm_sel; // @[util.scala:458:7]
wire [4:0] io_enq_bits_uop_pimm_0 = io_enq_bits_uop_pimm; // @[util.scala:458:7]
wire [19:0] io_enq_bits_uop_imm_packed_0 = io_enq_bits_uop_imm_packed; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_op1_sel_0 = io_enq_bits_uop_op1_sel; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_op2_sel_0 = io_enq_bits_uop_op2_sel; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_ldst_0 = io_enq_bits_uop_fp_ctrl_ldst; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_wen_0 = io_enq_bits_uop_fp_ctrl_wen; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_ren1_0 = io_enq_bits_uop_fp_ctrl_ren1; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_ren2_0 = io_enq_bits_uop_fp_ctrl_ren2; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_ren3_0 = io_enq_bits_uop_fp_ctrl_ren3; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_swap12_0 = io_enq_bits_uop_fp_ctrl_swap12; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_swap23_0 = io_enq_bits_uop_fp_ctrl_swap23; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_fp_ctrl_typeTagIn_0 = io_enq_bits_uop_fp_ctrl_typeTagIn; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_fp_ctrl_typeTagOut_0 = io_enq_bits_uop_fp_ctrl_typeTagOut; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_fromint_0 = io_enq_bits_uop_fp_ctrl_fromint; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_toint_0 = io_enq_bits_uop_fp_ctrl_toint; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_fastpipe_0 = io_enq_bits_uop_fp_ctrl_fastpipe; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_fma_0 = io_enq_bits_uop_fp_ctrl_fma; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_div_0 = io_enq_bits_uop_fp_ctrl_div; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_sqrt_0 = io_enq_bits_uop_fp_ctrl_sqrt; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_wflags_0 = io_enq_bits_uop_fp_ctrl_wflags; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_vec_0 = io_enq_bits_uop_fp_ctrl_vec; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_rob_idx_0 = io_enq_bits_uop_rob_idx; // @[util.scala:458:7]
wire [3:0] io_enq_bits_uop_ldq_idx_0 = io_enq_bits_uop_ldq_idx; // @[util.scala:458:7]
wire [3:0] io_enq_bits_uop_stq_idx_0 = io_enq_bits_uop_stq_idx; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_rxq_idx_0 = io_enq_bits_uop_rxq_idx; // @[util.scala:458:7]
wire [6:0] io_enq_bits_uop_pdst_0 = io_enq_bits_uop_pdst; // @[util.scala:458:7]
wire [6:0] io_enq_bits_uop_prs1_0 = io_enq_bits_uop_prs1; // @[util.scala:458:7]
wire [6:0] io_enq_bits_uop_prs2_0 = io_enq_bits_uop_prs2; // @[util.scala:458:7]
wire [6:0] io_enq_bits_uop_prs3_0 = io_enq_bits_uop_prs3; // @[util.scala:458:7]
wire [4:0] io_enq_bits_uop_ppred_0 = io_enq_bits_uop_ppred; // @[util.scala:458:7]
wire io_enq_bits_uop_prs1_busy_0 = io_enq_bits_uop_prs1_busy; // @[util.scala:458:7]
wire io_enq_bits_uop_prs2_busy_0 = io_enq_bits_uop_prs2_busy; // @[util.scala:458:7]
wire io_enq_bits_uop_prs3_busy_0 = io_enq_bits_uop_prs3_busy; // @[util.scala:458:7]
wire io_enq_bits_uop_ppred_busy_0 = io_enq_bits_uop_ppred_busy; // @[util.scala:458:7]
wire [6:0] io_enq_bits_uop_stale_pdst_0 = io_enq_bits_uop_stale_pdst; // @[util.scala:458:7]
wire io_enq_bits_uop_exception_0 = io_enq_bits_uop_exception; // @[util.scala:458:7]
wire [63:0] io_enq_bits_uop_exc_cause_0 = io_enq_bits_uop_exc_cause; // @[util.scala:458:7]
wire [4:0] io_enq_bits_uop_mem_cmd_0 = io_enq_bits_uop_mem_cmd; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_mem_size_0 = io_enq_bits_uop_mem_size; // @[util.scala:458:7]
wire io_enq_bits_uop_mem_signed_0 = io_enq_bits_uop_mem_signed; // @[util.scala:458:7]
wire io_enq_bits_uop_uses_ldq_0 = io_enq_bits_uop_uses_ldq; // @[util.scala:458:7]
wire io_enq_bits_uop_uses_stq_0 = io_enq_bits_uop_uses_stq; // @[util.scala:458:7]
wire io_enq_bits_uop_is_unique_0 = io_enq_bits_uop_is_unique; // @[util.scala:458:7]
wire io_enq_bits_uop_flush_on_commit_0 = io_enq_bits_uop_flush_on_commit; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_csr_cmd_0 = io_enq_bits_uop_csr_cmd; // @[util.scala:458:7]
wire io_enq_bits_uop_ldst_is_rs1_0 = io_enq_bits_uop_ldst_is_rs1; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_ldst_0 = io_enq_bits_uop_ldst; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_lrs1_0 = io_enq_bits_uop_lrs1; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_lrs2_0 = io_enq_bits_uop_lrs2; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_lrs3_0 = io_enq_bits_uop_lrs3; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_dst_rtype_0 = io_enq_bits_uop_dst_rtype; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_lrs1_rtype_0 = io_enq_bits_uop_lrs1_rtype; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_lrs2_rtype_0 = io_enq_bits_uop_lrs2_rtype; // @[util.scala:458:7]
wire io_enq_bits_uop_frs3_en_0 = io_enq_bits_uop_frs3_en; // @[util.scala:458:7]
wire io_enq_bits_uop_fcn_dw_0 = io_enq_bits_uop_fcn_dw; // @[util.scala:458:7]
wire [4:0] io_enq_bits_uop_fcn_op_0 = io_enq_bits_uop_fcn_op; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_val_0 = io_enq_bits_uop_fp_val; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_fp_rm_0 = io_enq_bits_uop_fp_rm; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_fp_typ_0 = io_enq_bits_uop_fp_typ; // @[util.scala:458:7]
wire io_enq_bits_uop_xcpt_pf_if_0 = io_enq_bits_uop_xcpt_pf_if; // @[util.scala:458:7]
wire io_enq_bits_uop_xcpt_ae_if_0 = io_enq_bits_uop_xcpt_ae_if; // @[util.scala:458:7]
wire io_enq_bits_uop_xcpt_ma_if_0 = io_enq_bits_uop_xcpt_ma_if; // @[util.scala:458:7]
wire io_enq_bits_uop_bp_debug_if_0 = io_enq_bits_uop_bp_debug_if; // @[util.scala:458:7]
wire io_enq_bits_uop_bp_xcpt_if_0 = io_enq_bits_uop_bp_xcpt_if; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_debug_fsrc_0 = io_enq_bits_uop_debug_fsrc; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_debug_tsrc_0 = io_enq_bits_uop_debug_tsrc; // @[util.scala:458:7]
wire [64:0] io_enq_bits_data_0 = io_enq_bits_data; // @[util.scala:458:7]
wire io_enq_bits_fflags_valid_0 = io_enq_bits_fflags_valid; // @[util.scala:458:7]
wire [4:0] io_enq_bits_fflags_bits_0 = io_enq_bits_fflags_bits; // @[util.scala:458:7]
wire io_deq_ready_0 = io_deq_ready; // @[util.scala:458:7]
wire [11:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[util.scala:458:7]
wire [11:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[util.scala:458:7]
wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[util.scala:458:7]
wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[util.scala:458:7]
wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iq_type_0_0 = io_brupdate_b2_uop_iq_type_0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iq_type_1_0 = io_brupdate_b2_uop_iq_type_1; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iq_type_2_0 = io_brupdate_b2_uop_iq_type_2; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iq_type_3_0 = io_brupdate_b2_uop_iq_type_3; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_0_0 = io_brupdate_b2_uop_fu_code_0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_1_0 = io_brupdate_b2_uop_fu_code_1; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_2_0 = io_brupdate_b2_uop_fu_code_2; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_3_0 = io_brupdate_b2_uop_fu_code_3; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_4_0 = io_brupdate_b2_uop_fu_code_4; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_5_0 = io_brupdate_b2_uop_fu_code_5; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_6_0 = io_brupdate_b2_uop_fu_code_6; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_7_0 = io_brupdate_b2_uop_fu_code_7; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_8_0 = io_brupdate_b2_uop_fu_code_8; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_9_0 = io_brupdate_b2_uop_fu_code_9; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_issued_0 = io_brupdate_b2_uop_iw_issued; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_issued_partial_agen_0 = io_brupdate_b2_uop_iw_issued_partial_agen; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_issued_partial_dgen_0 = io_brupdate_b2_uop_iw_issued_partial_dgen; // @[util.scala:458:7]
wire [1:0] io_brupdate_b2_uop_iw_p1_speculative_child_0 = io_brupdate_b2_uop_iw_p1_speculative_child; // @[util.scala:458:7]
wire [1:0] io_brupdate_b2_uop_iw_p2_speculative_child_0 = io_brupdate_b2_uop_iw_p2_speculative_child; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_p1_bypass_hint_0 = io_brupdate_b2_uop_iw_p1_bypass_hint; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_p2_bypass_hint_0 = io_brupdate_b2_uop_iw_p2_bypass_hint; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_p3_bypass_hint_0 = io_brupdate_b2_uop_iw_p3_bypass_hint; // @[util.scala:458:7]
wire [1:0] io_brupdate_b2_uop_dis_col_sel_0 = io_brupdate_b2_uop_dis_col_sel; // @[util.scala:458:7]
wire [11:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[util.scala:458:7]
wire [3:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[util.scala:458:7]
wire [3:0] io_brupdate_b2_uop_br_type_0 = io_brupdate_b2_uop_br_type; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_sfence_0 = io_brupdate_b2_uop_is_sfence; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_eret_0 = io_brupdate_b2_uop_is_eret; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_rocc_0 = io_brupdate_b2_uop_is_rocc; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_mov_0 = io_brupdate_b2_uop_is_mov; // @[util.scala:458:7]
wire [4:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[util.scala:458:7]
wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[util.scala:458:7]
wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[util.scala:458:7]
wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[util.scala:458:7]
wire io_brupdate_b2_uop_imm_rename_0 = io_brupdate_b2_uop_imm_rename; // @[util.scala:458:7]
wire [2:0] io_brupdate_b2_uop_imm_sel_0 = io_brupdate_b2_uop_imm_sel; // @[util.scala:458:7]
wire [4:0] io_brupdate_b2_uop_pimm_0 = io_brupdate_b2_uop_pimm; // @[util.scala:458:7]
wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[util.scala:458:7]
wire [1:0] io_brupdate_b2_uop_op1_sel_0 = io_brupdate_b2_uop_op1_sel; // @[util.scala:458:7]
wire [2:0] io_brupdate_b2_uop_op2_sel_0 = io_brupdate_b2_uop_op2_sel; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_ldst_0 = io_brupdate_b2_uop_fp_ctrl_ldst; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_wen_0 = io_brupdate_b2_uop_fp_ctrl_wen; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_ren1_0 = io_brupdate_b2_uop_fp_ctrl_ren1; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_ren2_0 = io_brupdate_b2_uop_fp_ctrl_ren2; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_ren3_0 = io_brupdate_b2_uop_fp_ctrl_ren3; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_swap12_0 = io_brupdate_b2_uop_fp_ctrl_swap12; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_swap23_0 = io_brupdate_b2_uop_fp_ctrl_swap23; // @[util.scala:458:7]
wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagIn_0 = io_brupdate_b2_uop_fp_ctrl_typeTagIn; // @[util.scala:458:7]
wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagOut_0 = io_brupdate_b2_uop_fp_ctrl_typeTagOut; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_fromint_0 = io_brupdate_b2_uop_fp_ctrl_fromint; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_toint_0 = io_brupdate_b2_uop_fp_ctrl_toint; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_fastpipe_0 = io_brupdate_b2_uop_fp_ctrl_fastpipe; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_fma_0 = io_brupdate_b2_uop_fp_ctrl_fma; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_div_0 = io_brupdate_b2_uop_fp_ctrl_div; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_sqrt_0 = io_brupdate_b2_uop_fp_ctrl_sqrt; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_wflags_0 = io_brupdate_b2_uop_fp_ctrl_wflags; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_vec_0 = io_brupdate_b2_uop_fp_ctrl_vec; // @[util.scala:458:7]
wire [5:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[util.scala:458:7]
wire [3:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[util.scala:458:7]
wire [3:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[util.scala:458:7]
wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[util.scala:458:7]
wire [6:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[util.scala:458:7]
wire [6:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[util.scala:458:7]
wire [6:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[util.scala:458:7]
wire [6:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[util.scala:458:7]
wire [4:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[util.scala:458:7]
wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[util.scala:458:7]
wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[util.scala:458:7]
wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[util.scala:458:7]
wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[util.scala:458:7]
wire [6:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[util.scala:458:7]
wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[util.scala:458:7]
wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[util.scala:458:7]
wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[util.scala:458:7]
wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[util.scala:458:7]
wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[util.scala:458:7]
wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[util.scala:458:7]
wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[util.scala:458:7]
wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[util.scala:458:7]
wire [2:0] io_brupdate_b2_uop_csr_cmd_0 = io_brupdate_b2_uop_csr_cmd; // @[util.scala:458:7]
wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[util.scala:458:7]
wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[util.scala:458:7]
wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[util.scala:458:7]
wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[util.scala:458:7]
wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[util.scala:458:7]
wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[util.scala:458:7]
wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[util.scala:458:7]
wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[util.scala:458:7]
wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fcn_dw_0 = io_brupdate_b2_uop_fcn_dw; // @[util.scala:458:7]
wire [4:0] io_brupdate_b2_uop_fcn_op_0 = io_brupdate_b2_uop_fcn_op; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[util.scala:458:7]
wire [2:0] io_brupdate_b2_uop_fp_rm_0 = io_brupdate_b2_uop_fp_rm; // @[util.scala:458:7]
wire [1:0] io_brupdate_b2_uop_fp_typ_0 = io_brupdate_b2_uop_fp_typ; // @[util.scala:458:7]
wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[util.scala:458:7]
wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[util.scala:458:7]
wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[util.scala:458:7]
wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[util.scala:458:7]
wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[util.scala:458:7]
wire [2:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[util.scala:458:7]
wire [2:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[util.scala:458:7]
wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[util.scala:458:7]
wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[util.scala:458:7]
wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[util.scala:458:7]
wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[util.scala:458:7]
wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[util.scala:458:7]
wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[util.scala:458:7]
wire io_flush_0 = io_flush; // @[util.scala:458:7]
wire io_enq_bits_predicated = 1'h0; // @[util.scala:458:7]
wire _valids_WIRE_0 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_1 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_2 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_3 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_4 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_5 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_6 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_7 = 1'h0; // @[util.scala:504:34]
wire _io_enq_ready_T; // @[util.scala:543:21]
wire _io_deq_valid_T_1; // @[util.scala:548:42]
wire [31:0] out_uop_inst; // @[util.scala:545:19]
wire [31:0] out_uop_debug_inst; // @[util.scala:545:19]
wire out_uop_is_rvc; // @[util.scala:545:19]
wire [39:0] out_uop_debug_pc; // @[util.scala:545:19]
wire out_uop_iq_type_0; // @[util.scala:545:19]
wire out_uop_iq_type_1; // @[util.scala:545:19]
wire out_uop_iq_type_2; // @[util.scala:545:19]
wire out_uop_iq_type_3; // @[util.scala:545:19]
wire out_uop_fu_code_0; // @[util.scala:545:19]
wire out_uop_fu_code_1; // @[util.scala:545:19]
wire out_uop_fu_code_2; // @[util.scala:545:19]
wire out_uop_fu_code_3; // @[util.scala:545:19]
wire out_uop_fu_code_4; // @[util.scala:545:19]
wire out_uop_fu_code_5; // @[util.scala:545:19]
wire out_uop_fu_code_6; // @[util.scala:545:19]
wire out_uop_fu_code_7; // @[util.scala:545:19]
wire out_uop_fu_code_8; // @[util.scala:545:19]
wire out_uop_fu_code_9; // @[util.scala:545:19]
wire out_uop_iw_issued; // @[util.scala:545:19]
wire out_uop_iw_issued_partial_agen; // @[util.scala:545:19]
wire out_uop_iw_issued_partial_dgen; // @[util.scala:545:19]
wire [1:0] out_uop_iw_p1_speculative_child; // @[util.scala:545:19]
wire [1:0] out_uop_iw_p2_speculative_child; // @[util.scala:545:19]
wire out_uop_iw_p1_bypass_hint; // @[util.scala:545:19]
wire out_uop_iw_p2_bypass_hint; // @[util.scala:545:19]
wire out_uop_iw_p3_bypass_hint; // @[util.scala:545:19]
wire [1:0] out_uop_dis_col_sel; // @[util.scala:545:19]
wire [11:0] out_uop_br_mask; // @[util.scala:545:19]
wire [3:0] out_uop_br_tag; // @[util.scala:545:19]
wire [3:0] out_uop_br_type; // @[util.scala:545:19]
wire out_uop_is_sfb; // @[util.scala:545:19]
wire out_uop_is_fence; // @[util.scala:545:19]
wire out_uop_is_fencei; // @[util.scala:545:19]
wire out_uop_is_sfence; // @[util.scala:545:19]
wire out_uop_is_amo; // @[util.scala:545:19]
wire out_uop_is_eret; // @[util.scala:545:19]
wire out_uop_is_sys_pc2epc; // @[util.scala:545:19]
wire out_uop_is_rocc; // @[util.scala:545:19]
wire out_uop_is_mov; // @[util.scala:545:19]
wire [4:0] out_uop_ftq_idx; // @[util.scala:545:19]
wire out_uop_edge_inst; // @[util.scala:545:19]
wire [5:0] out_uop_pc_lob; // @[util.scala:545:19]
wire out_uop_taken; // @[util.scala:545:19]
wire out_uop_imm_rename; // @[util.scala:545:19]
wire [2:0] out_uop_imm_sel; // @[util.scala:545:19]
wire [4:0] out_uop_pimm; // @[util.scala:545:19]
wire [19:0] out_uop_imm_packed; // @[util.scala:545:19]
wire [1:0] out_uop_op1_sel; // @[util.scala:545:19]
wire [2:0] out_uop_op2_sel; // @[util.scala:545:19]
wire out_uop_fp_ctrl_ldst; // @[util.scala:545:19]
wire out_uop_fp_ctrl_wen; // @[util.scala:545:19]
wire out_uop_fp_ctrl_ren1; // @[util.scala:545:19]
wire out_uop_fp_ctrl_ren2; // @[util.scala:545:19]
wire out_uop_fp_ctrl_ren3; // @[util.scala:545:19]
wire out_uop_fp_ctrl_swap12; // @[util.scala:545:19]
wire out_uop_fp_ctrl_swap23; // @[util.scala:545:19]
wire [1:0] out_uop_fp_ctrl_typeTagIn; // @[util.scala:545:19]
wire [1:0] out_uop_fp_ctrl_typeTagOut; // @[util.scala:545:19]
wire out_uop_fp_ctrl_fromint; // @[util.scala:545:19]
wire out_uop_fp_ctrl_toint; // @[util.scala:545:19]
wire out_uop_fp_ctrl_fastpipe; // @[util.scala:545:19]
wire out_uop_fp_ctrl_fma; // @[util.scala:545:19]
wire out_uop_fp_ctrl_div; // @[util.scala:545:19]
wire out_uop_fp_ctrl_sqrt; // @[util.scala:545:19]
wire out_uop_fp_ctrl_wflags; // @[util.scala:545:19]
wire out_uop_fp_ctrl_vec; // @[util.scala:545:19]
wire [5:0] out_uop_rob_idx; // @[util.scala:545:19]
wire [3:0] out_uop_ldq_idx; // @[util.scala:545:19]
wire [3:0] out_uop_stq_idx; // @[util.scala:545:19]
wire [1:0] out_uop_rxq_idx; // @[util.scala:545:19]
wire [6:0] out_uop_pdst; // @[util.scala:545:19]
wire [6:0] out_uop_prs1; // @[util.scala:545:19]
wire [6:0] out_uop_prs2; // @[util.scala:545:19]
wire [6:0] out_uop_prs3; // @[util.scala:545:19]
wire [4:0] out_uop_ppred; // @[util.scala:545:19]
wire out_uop_prs1_busy; // @[util.scala:545:19]
wire out_uop_prs2_busy; // @[util.scala:545:19]
wire out_uop_prs3_busy; // @[util.scala:545:19]
wire out_uop_ppred_busy; // @[util.scala:545:19]
wire [6:0] out_uop_stale_pdst; // @[util.scala:545:19]
wire out_uop_exception; // @[util.scala:545:19]
wire [63:0] out_uop_exc_cause; // @[util.scala:545:19]
wire [4:0] out_uop_mem_cmd; // @[util.scala:545:19]
wire [1:0] out_uop_mem_size; // @[util.scala:545:19]
wire out_uop_mem_signed; // @[util.scala:545:19]
wire out_uop_uses_ldq; // @[util.scala:545:19]
wire out_uop_uses_stq; // @[util.scala:545:19]
wire out_uop_is_unique; // @[util.scala:545:19]
wire out_uop_flush_on_commit; // @[util.scala:545:19]
wire [2:0] out_uop_csr_cmd; // @[util.scala:545:19]
wire out_uop_ldst_is_rs1; // @[util.scala:545:19]
wire [5:0] out_uop_ldst; // @[util.scala:545:19]
wire [5:0] out_uop_lrs1; // @[util.scala:545:19]
wire [5:0] out_uop_lrs2; // @[util.scala:545:19]
wire [5:0] out_uop_lrs3; // @[util.scala:545:19]
wire [1:0] out_uop_dst_rtype; // @[util.scala:545:19]
wire [1:0] out_uop_lrs1_rtype; // @[util.scala:545:19]
wire [1:0] out_uop_lrs2_rtype; // @[util.scala:545:19]
wire out_uop_frs3_en; // @[util.scala:545:19]
wire out_uop_fcn_dw; // @[util.scala:545:19]
wire [4:0] out_uop_fcn_op; // @[util.scala:545:19]
wire out_uop_fp_val; // @[util.scala:545:19]
wire [2:0] out_uop_fp_rm; // @[util.scala:545:19]
wire [1:0] out_uop_fp_typ; // @[util.scala:545:19]
wire out_uop_xcpt_pf_if; // @[util.scala:545:19]
wire out_uop_xcpt_ae_if; // @[util.scala:545:19]
wire out_uop_xcpt_ma_if; // @[util.scala:545:19]
wire out_uop_bp_debug_if; // @[util.scala:545:19]
wire out_uop_bp_xcpt_if; // @[util.scala:545:19]
wire [2:0] out_uop_debug_fsrc; // @[util.scala:545:19]
wire [2:0] out_uop_debug_tsrc; // @[util.scala:545:19]
wire [64:0] out_data; // @[util.scala:545:19]
wire out_predicated; // @[util.scala:545:19]
wire out_fflags_valid; // @[util.scala:545:19]
wire [4:0] out_fflags_bits; // @[util.scala:545:19]
wire _io_empty_T_1; // @[util.scala:512:27]
wire _do_enq_T_6 = io_flush_0; // @[util.scala:458:7, :514:113]
wire _valids_0_T_5 = io_flush_0; // @[util.scala:458:7, :520:94]
wire _valids_1_T_5 = io_flush_0; // @[util.scala:458:7, :520:94]
wire _valids_2_T_5 = io_flush_0; // @[util.scala:458:7, :520:94]
wire _valids_3_T_5 = io_flush_0; // @[util.scala:458:7, :520:94]
wire _valids_4_T_5 = io_flush_0; // @[util.scala:458:7, :520:94]
wire _valids_5_T_5 = io_flush_0; // @[util.scala:458:7, :520:94]
wire _valids_6_T_5 = io_flush_0; // @[util.scala:458:7, :520:94]
wire _valids_7_T_5 = io_flush_0; // @[util.scala:458:7, :520:94]
wire io_enq_ready_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iq_type_0_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iq_type_1_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iq_type_2_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iq_type_3_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_0_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_1_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_2_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_3_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_4_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_5_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_6_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_7_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_8_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_9_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7]
wire [31:0] io_deq_bits_uop_inst_0; // @[util.scala:458:7]
wire [31:0] io_deq_bits_uop_debug_inst_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_rvc_0; // @[util.scala:458:7]
wire [39:0] io_deq_bits_uop_debug_pc_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_issued_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_dis_col_sel_0; // @[util.scala:458:7]
wire [11:0] io_deq_bits_uop_br_mask_0; // @[util.scala:458:7]
wire [3:0] io_deq_bits_uop_br_tag_0; // @[util.scala:458:7]
wire [3:0] io_deq_bits_uop_br_type_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_sfb_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_fence_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_fencei_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_sfence_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_amo_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_eret_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_rocc_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_mov_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_uop_ftq_idx_0; // @[util.scala:458:7]
wire io_deq_bits_uop_edge_inst_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_pc_lob_0; // @[util.scala:458:7]
wire io_deq_bits_uop_taken_0; // @[util.scala:458:7]
wire io_deq_bits_uop_imm_rename_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_imm_sel_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_uop_pimm_0; // @[util.scala:458:7]
wire [19:0] io_deq_bits_uop_imm_packed_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_op1_sel_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_op2_sel_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_rob_idx_0; // @[util.scala:458:7]
wire [3:0] io_deq_bits_uop_ldq_idx_0; // @[util.scala:458:7]
wire [3:0] io_deq_bits_uop_stq_idx_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_rxq_idx_0; // @[util.scala:458:7]
wire [6:0] io_deq_bits_uop_pdst_0; // @[util.scala:458:7]
wire [6:0] io_deq_bits_uop_prs1_0; // @[util.scala:458:7]
wire [6:0] io_deq_bits_uop_prs2_0; // @[util.scala:458:7]
wire [6:0] io_deq_bits_uop_prs3_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_uop_ppred_0; // @[util.scala:458:7]
wire io_deq_bits_uop_prs1_busy_0; // @[util.scala:458:7]
wire io_deq_bits_uop_prs2_busy_0; // @[util.scala:458:7]
wire io_deq_bits_uop_prs3_busy_0; // @[util.scala:458:7]
wire io_deq_bits_uop_ppred_busy_0; // @[util.scala:458:7]
wire [6:0] io_deq_bits_uop_stale_pdst_0; // @[util.scala:458:7]
wire io_deq_bits_uop_exception_0; // @[util.scala:458:7]
wire [63:0] io_deq_bits_uop_exc_cause_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_uop_mem_cmd_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_mem_size_0; // @[util.scala:458:7]
wire io_deq_bits_uop_mem_signed_0; // @[util.scala:458:7]
wire io_deq_bits_uop_uses_ldq_0; // @[util.scala:458:7]
wire io_deq_bits_uop_uses_stq_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_unique_0; // @[util.scala:458:7]
wire io_deq_bits_uop_flush_on_commit_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_csr_cmd_0; // @[util.scala:458:7]
wire io_deq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_ldst_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_lrs1_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_lrs2_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_lrs3_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_dst_rtype_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7]
wire io_deq_bits_uop_frs3_en_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fcn_dw_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_uop_fcn_op_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_val_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_fp_rm_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_fp_typ_0; // @[util.scala:458:7]
wire io_deq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7]
wire io_deq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7]
wire io_deq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7]
wire io_deq_bits_uop_bp_debug_if_0; // @[util.scala:458:7]
wire io_deq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_debug_fsrc_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_debug_tsrc_0; // @[util.scala:458:7]
wire io_deq_bits_fflags_valid_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_fflags_bits_0; // @[util.scala:458:7]
wire [64:0] io_deq_bits_data_0; // @[util.scala:458:7]
wire io_deq_bits_predicated_0; // @[util.scala:458:7]
wire io_deq_valid_0; // @[util.scala:458:7]
wire io_empty; // @[util.scala:458:7]
wire [2:0] io_count_0; // @[util.scala:458:7]
assign out_data = _ram_ext_R0_data[64:0]; // @[util.scala:503:22, :545:19]
assign out_predicated = _ram_ext_R0_data[65]; // @[util.scala:503:22, :545:19]
assign out_fflags_valid = _ram_ext_R0_data[66]; // @[util.scala:503:22, :545:19]
assign out_fflags_bits = _ram_ext_R0_data[71:67]; // @[util.scala:503:22, :545:19]
reg valids_0; // @[util.scala:504:26]
reg valids_1; // @[util.scala:504:26]
reg valids_2; // @[util.scala:504:26]
reg valids_3; // @[util.scala:504:26]
reg valids_4; // @[util.scala:504:26]
reg valids_5; // @[util.scala:504:26]
reg valids_6; // @[util.scala:504:26]
reg valids_7; // @[util.scala:504:26]
reg [31:0] uops_0_inst; // @[util.scala:505:22]
reg [31:0] uops_0_debug_inst; // @[util.scala:505:22]
reg uops_0_is_rvc; // @[util.scala:505:22]
reg [39:0] uops_0_debug_pc; // @[util.scala:505:22]
reg uops_0_iq_type_0; // @[util.scala:505:22]
reg uops_0_iq_type_1; // @[util.scala:505:22]
reg uops_0_iq_type_2; // @[util.scala:505:22]
reg uops_0_iq_type_3; // @[util.scala:505:22]
reg uops_0_fu_code_0; // @[util.scala:505:22]
reg uops_0_fu_code_1; // @[util.scala:505:22]
reg uops_0_fu_code_2; // @[util.scala:505:22]
reg uops_0_fu_code_3; // @[util.scala:505:22]
reg uops_0_fu_code_4; // @[util.scala:505:22]
reg uops_0_fu_code_5; // @[util.scala:505:22]
reg uops_0_fu_code_6; // @[util.scala:505:22]
reg uops_0_fu_code_7; // @[util.scala:505:22]
reg uops_0_fu_code_8; // @[util.scala:505:22]
reg uops_0_fu_code_9; // @[util.scala:505:22]
reg uops_0_iw_issued; // @[util.scala:505:22]
reg uops_0_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_0_iw_issued_partial_dgen; // @[util.scala:505:22]
reg [1:0] uops_0_iw_p1_speculative_child; // @[util.scala:505:22]
reg [1:0] uops_0_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_0_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_0_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_0_iw_p3_bypass_hint; // @[util.scala:505:22]
reg [1:0] uops_0_dis_col_sel; // @[util.scala:505:22]
reg [11:0] uops_0_br_mask; // @[util.scala:505:22]
reg [3:0] uops_0_br_tag; // @[util.scala:505:22]
reg [3:0] uops_0_br_type; // @[util.scala:505:22]
reg uops_0_is_sfb; // @[util.scala:505:22]
reg uops_0_is_fence; // @[util.scala:505:22]
reg uops_0_is_fencei; // @[util.scala:505:22]
reg uops_0_is_sfence; // @[util.scala:505:22]
reg uops_0_is_amo; // @[util.scala:505:22]
reg uops_0_is_eret; // @[util.scala:505:22]
reg uops_0_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_0_is_rocc; // @[util.scala:505:22]
reg uops_0_is_mov; // @[util.scala:505:22]
reg [4:0] uops_0_ftq_idx; // @[util.scala:505:22]
reg uops_0_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_0_pc_lob; // @[util.scala:505:22]
reg uops_0_taken; // @[util.scala:505:22]
reg uops_0_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_0_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_0_pimm; // @[util.scala:505:22]
reg [19:0] uops_0_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_0_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_0_op2_sel; // @[util.scala:505:22]
reg uops_0_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_0_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_0_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_0_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_0_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_0_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_0_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_0_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_0_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_0_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_0_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_0_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_0_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_0_fp_ctrl_div; // @[util.scala:505:22]
reg uops_0_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_0_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_0_fp_ctrl_vec; // @[util.scala:505:22]
reg [5:0] uops_0_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_0_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_0_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_0_rxq_idx; // @[util.scala:505:22]
reg [6:0] uops_0_pdst; // @[util.scala:505:22]
reg [6:0] uops_0_prs1; // @[util.scala:505:22]
reg [6:0] uops_0_prs2; // @[util.scala:505:22]
reg [6:0] uops_0_prs3; // @[util.scala:505:22]
reg [4:0] uops_0_ppred; // @[util.scala:505:22]
reg uops_0_prs1_busy; // @[util.scala:505:22]
reg uops_0_prs2_busy; // @[util.scala:505:22]
reg uops_0_prs3_busy; // @[util.scala:505:22]
reg uops_0_ppred_busy; // @[util.scala:505:22]
reg [6:0] uops_0_stale_pdst; // @[util.scala:505:22]
reg uops_0_exception; // @[util.scala:505:22]
reg [63:0] uops_0_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_0_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_0_mem_size; // @[util.scala:505:22]
reg uops_0_mem_signed; // @[util.scala:505:22]
reg uops_0_uses_ldq; // @[util.scala:505:22]
reg uops_0_uses_stq; // @[util.scala:505:22]
reg uops_0_is_unique; // @[util.scala:505:22]
reg uops_0_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_0_csr_cmd; // @[util.scala:505:22]
reg uops_0_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_0_ldst; // @[util.scala:505:22]
reg [5:0] uops_0_lrs1; // @[util.scala:505:22]
reg [5:0] uops_0_lrs2; // @[util.scala:505:22]
reg [5:0] uops_0_lrs3; // @[util.scala:505:22]
reg [1:0] uops_0_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_0_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_0_lrs2_rtype; // @[util.scala:505:22]
reg uops_0_frs3_en; // @[util.scala:505:22]
reg uops_0_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_0_fcn_op; // @[util.scala:505:22]
reg uops_0_fp_val; // @[util.scala:505:22]
reg [2:0] uops_0_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_0_fp_typ; // @[util.scala:505:22]
reg uops_0_xcpt_pf_if; // @[util.scala:505:22]
reg uops_0_xcpt_ae_if; // @[util.scala:505:22]
reg uops_0_xcpt_ma_if; // @[util.scala:505:22]
reg uops_0_bp_debug_if; // @[util.scala:505:22]
reg uops_0_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_0_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_0_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_1_inst; // @[util.scala:505:22]
reg [31:0] uops_1_debug_inst; // @[util.scala:505:22]
reg uops_1_is_rvc; // @[util.scala:505:22]
reg [39:0] uops_1_debug_pc; // @[util.scala:505:22]
reg uops_1_iq_type_0; // @[util.scala:505:22]
reg uops_1_iq_type_1; // @[util.scala:505:22]
reg uops_1_iq_type_2; // @[util.scala:505:22]
reg uops_1_iq_type_3; // @[util.scala:505:22]
reg uops_1_fu_code_0; // @[util.scala:505:22]
reg uops_1_fu_code_1; // @[util.scala:505:22]
reg uops_1_fu_code_2; // @[util.scala:505:22]
reg uops_1_fu_code_3; // @[util.scala:505:22]
reg uops_1_fu_code_4; // @[util.scala:505:22]
reg uops_1_fu_code_5; // @[util.scala:505:22]
reg uops_1_fu_code_6; // @[util.scala:505:22]
reg uops_1_fu_code_7; // @[util.scala:505:22]
reg uops_1_fu_code_8; // @[util.scala:505:22]
reg uops_1_fu_code_9; // @[util.scala:505:22]
reg uops_1_iw_issued; // @[util.scala:505:22]
reg uops_1_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_1_iw_issued_partial_dgen; // @[util.scala:505:22]
reg [1:0] uops_1_iw_p1_speculative_child; // @[util.scala:505:22]
reg [1:0] uops_1_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_1_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_1_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_1_iw_p3_bypass_hint; // @[util.scala:505:22]
reg [1:0] uops_1_dis_col_sel; // @[util.scala:505:22]
reg [11:0] uops_1_br_mask; // @[util.scala:505:22]
reg [3:0] uops_1_br_tag; // @[util.scala:505:22]
reg [3:0] uops_1_br_type; // @[util.scala:505:22]
reg uops_1_is_sfb; // @[util.scala:505:22]
reg uops_1_is_fence; // @[util.scala:505:22]
reg uops_1_is_fencei; // @[util.scala:505:22]
reg uops_1_is_sfence; // @[util.scala:505:22]
reg uops_1_is_amo; // @[util.scala:505:22]
reg uops_1_is_eret; // @[util.scala:505:22]
reg uops_1_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_1_is_rocc; // @[util.scala:505:22]
reg uops_1_is_mov; // @[util.scala:505:22]
reg [4:0] uops_1_ftq_idx; // @[util.scala:505:22]
reg uops_1_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_1_pc_lob; // @[util.scala:505:22]
reg uops_1_taken; // @[util.scala:505:22]
reg uops_1_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_1_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_1_pimm; // @[util.scala:505:22]
reg [19:0] uops_1_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_1_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_1_op2_sel; // @[util.scala:505:22]
reg uops_1_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_1_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_1_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_1_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_1_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_1_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_1_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_1_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_1_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_1_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_1_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_1_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_1_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_1_fp_ctrl_div; // @[util.scala:505:22]
reg uops_1_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_1_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_1_fp_ctrl_vec; // @[util.scala:505:22]
reg [5:0] uops_1_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_1_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_1_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_1_rxq_idx; // @[util.scala:505:22]
reg [6:0] uops_1_pdst; // @[util.scala:505:22]
reg [6:0] uops_1_prs1; // @[util.scala:505:22]
reg [6:0] uops_1_prs2; // @[util.scala:505:22]
reg [6:0] uops_1_prs3; // @[util.scala:505:22]
reg [4:0] uops_1_ppred; // @[util.scala:505:22]
reg uops_1_prs1_busy; // @[util.scala:505:22]
reg uops_1_prs2_busy; // @[util.scala:505:22]
reg uops_1_prs3_busy; // @[util.scala:505:22]
reg uops_1_ppred_busy; // @[util.scala:505:22]
reg [6:0] uops_1_stale_pdst; // @[util.scala:505:22]
reg uops_1_exception; // @[util.scala:505:22]
reg [63:0] uops_1_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_1_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_1_mem_size; // @[util.scala:505:22]
reg uops_1_mem_signed; // @[util.scala:505:22]
reg uops_1_uses_ldq; // @[util.scala:505:22]
reg uops_1_uses_stq; // @[util.scala:505:22]
reg uops_1_is_unique; // @[util.scala:505:22]
reg uops_1_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_1_csr_cmd; // @[util.scala:505:22]
reg uops_1_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_1_ldst; // @[util.scala:505:22]
reg [5:0] uops_1_lrs1; // @[util.scala:505:22]
reg [5:0] uops_1_lrs2; // @[util.scala:505:22]
reg [5:0] uops_1_lrs3; // @[util.scala:505:22]
reg [1:0] uops_1_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_1_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_1_lrs2_rtype; // @[util.scala:505:22]
reg uops_1_frs3_en; // @[util.scala:505:22]
reg uops_1_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_1_fcn_op; // @[util.scala:505:22]
reg uops_1_fp_val; // @[util.scala:505:22]
reg [2:0] uops_1_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_1_fp_typ; // @[util.scala:505:22]
reg uops_1_xcpt_pf_if; // @[util.scala:505:22]
reg uops_1_xcpt_ae_if; // @[util.scala:505:22]
reg uops_1_xcpt_ma_if; // @[util.scala:505:22]
reg uops_1_bp_debug_if; // @[util.scala:505:22]
reg uops_1_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_1_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_1_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_2_inst; // @[util.scala:505:22]
reg [31:0] uops_2_debug_inst; // @[util.scala:505:22]
reg uops_2_is_rvc; // @[util.scala:505:22]
reg [39:0] uops_2_debug_pc; // @[util.scala:505:22]
reg uops_2_iq_type_0; // @[util.scala:505:22]
reg uops_2_iq_type_1; // @[util.scala:505:22]
reg uops_2_iq_type_2; // @[util.scala:505:22]
reg uops_2_iq_type_3; // @[util.scala:505:22]
reg uops_2_fu_code_0; // @[util.scala:505:22]
reg uops_2_fu_code_1; // @[util.scala:505:22]
reg uops_2_fu_code_2; // @[util.scala:505:22]
reg uops_2_fu_code_3; // @[util.scala:505:22]
reg uops_2_fu_code_4; // @[util.scala:505:22]
reg uops_2_fu_code_5; // @[util.scala:505:22]
reg uops_2_fu_code_6; // @[util.scala:505:22]
reg uops_2_fu_code_7; // @[util.scala:505:22]
reg uops_2_fu_code_8; // @[util.scala:505:22]
reg uops_2_fu_code_9; // @[util.scala:505:22]
reg uops_2_iw_issued; // @[util.scala:505:22]
reg uops_2_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_2_iw_issued_partial_dgen; // @[util.scala:505:22]
reg [1:0] uops_2_iw_p1_speculative_child; // @[util.scala:505:22]
reg [1:0] uops_2_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_2_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_2_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_2_iw_p3_bypass_hint; // @[util.scala:505:22]
reg [1:0] uops_2_dis_col_sel; // @[util.scala:505:22]
reg [11:0] uops_2_br_mask; // @[util.scala:505:22]
reg [3:0] uops_2_br_tag; // @[util.scala:505:22]
reg [3:0] uops_2_br_type; // @[util.scala:505:22]
reg uops_2_is_sfb; // @[util.scala:505:22]
reg uops_2_is_fence; // @[util.scala:505:22]
reg uops_2_is_fencei; // @[util.scala:505:22]
reg uops_2_is_sfence; // @[util.scala:505:22]
reg uops_2_is_amo; // @[util.scala:505:22]
reg uops_2_is_eret; // @[util.scala:505:22]
reg uops_2_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_2_is_rocc; // @[util.scala:505:22]
reg uops_2_is_mov; // @[util.scala:505:22]
reg [4:0] uops_2_ftq_idx; // @[util.scala:505:22]
reg uops_2_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_2_pc_lob; // @[util.scala:505:22]
reg uops_2_taken; // @[util.scala:505:22]
reg uops_2_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_2_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_2_pimm; // @[util.scala:505:22]
reg [19:0] uops_2_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_2_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_2_op2_sel; // @[util.scala:505:22]
reg uops_2_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_2_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_2_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_2_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_2_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_2_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_2_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_2_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_2_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_2_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_2_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_2_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_2_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_2_fp_ctrl_div; // @[util.scala:505:22]
reg uops_2_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_2_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_2_fp_ctrl_vec; // @[util.scala:505:22]
reg [5:0] uops_2_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_2_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_2_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_2_rxq_idx; // @[util.scala:505:22]
reg [6:0] uops_2_pdst; // @[util.scala:505:22]
reg [6:0] uops_2_prs1; // @[util.scala:505:22]
reg [6:0] uops_2_prs2; // @[util.scala:505:22]
reg [6:0] uops_2_prs3; // @[util.scala:505:22]
reg [4:0] uops_2_ppred; // @[util.scala:505:22]
reg uops_2_prs1_busy; // @[util.scala:505:22]
reg uops_2_prs2_busy; // @[util.scala:505:22]
reg uops_2_prs3_busy; // @[util.scala:505:22]
reg uops_2_ppred_busy; // @[util.scala:505:22]
reg [6:0] uops_2_stale_pdst; // @[util.scala:505:22]
reg uops_2_exception; // @[util.scala:505:22]
reg [63:0] uops_2_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_2_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_2_mem_size; // @[util.scala:505:22]
reg uops_2_mem_signed; // @[util.scala:505:22]
reg uops_2_uses_ldq; // @[util.scala:505:22]
reg uops_2_uses_stq; // @[util.scala:505:22]
reg uops_2_is_unique; // @[util.scala:505:22]
reg uops_2_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_2_csr_cmd; // @[util.scala:505:22]
reg uops_2_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_2_ldst; // @[util.scala:505:22]
reg [5:0] uops_2_lrs1; // @[util.scala:505:22]
reg [5:0] uops_2_lrs2; // @[util.scala:505:22]
reg [5:0] uops_2_lrs3; // @[util.scala:505:22]
reg [1:0] uops_2_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_2_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_2_lrs2_rtype; // @[util.scala:505:22]
reg uops_2_frs3_en; // @[util.scala:505:22]
reg uops_2_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_2_fcn_op; // @[util.scala:505:22]
reg uops_2_fp_val; // @[util.scala:505:22]
reg [2:0] uops_2_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_2_fp_typ; // @[util.scala:505:22]
reg uops_2_xcpt_pf_if; // @[util.scala:505:22]
reg uops_2_xcpt_ae_if; // @[util.scala:505:22]
reg uops_2_xcpt_ma_if; // @[util.scala:505:22]
reg uops_2_bp_debug_if; // @[util.scala:505:22]
reg uops_2_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_2_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_2_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_3_inst; // @[util.scala:505:22]
reg [31:0] uops_3_debug_inst; // @[util.scala:505:22]
reg uops_3_is_rvc; // @[util.scala:505:22]
reg [39:0] uops_3_debug_pc; // @[util.scala:505:22]
reg uops_3_iq_type_0; // @[util.scala:505:22]
reg uops_3_iq_type_1; // @[util.scala:505:22]
reg uops_3_iq_type_2; // @[util.scala:505:22]
reg uops_3_iq_type_3; // @[util.scala:505:22]
reg uops_3_fu_code_0; // @[util.scala:505:22]
reg uops_3_fu_code_1; // @[util.scala:505:22]
reg uops_3_fu_code_2; // @[util.scala:505:22]
reg uops_3_fu_code_3; // @[util.scala:505:22]
reg uops_3_fu_code_4; // @[util.scala:505:22]
reg uops_3_fu_code_5; // @[util.scala:505:22]
reg uops_3_fu_code_6; // @[util.scala:505:22]
reg uops_3_fu_code_7; // @[util.scala:505:22]
reg uops_3_fu_code_8; // @[util.scala:505:22]
reg uops_3_fu_code_9; // @[util.scala:505:22]
reg uops_3_iw_issued; // @[util.scala:505:22]
reg uops_3_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_3_iw_issued_partial_dgen; // @[util.scala:505:22]
reg [1:0] uops_3_iw_p1_speculative_child; // @[util.scala:505:22]
reg [1:0] uops_3_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_3_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_3_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_3_iw_p3_bypass_hint; // @[util.scala:505:22]
reg [1:0] uops_3_dis_col_sel; // @[util.scala:505:22]
reg [11:0] uops_3_br_mask; // @[util.scala:505:22]
reg [3:0] uops_3_br_tag; // @[util.scala:505:22]
reg [3:0] uops_3_br_type; // @[util.scala:505:22]
reg uops_3_is_sfb; // @[util.scala:505:22]
reg uops_3_is_fence; // @[util.scala:505:22]
reg uops_3_is_fencei; // @[util.scala:505:22]
reg uops_3_is_sfence; // @[util.scala:505:22]
reg uops_3_is_amo; // @[util.scala:505:22]
reg uops_3_is_eret; // @[util.scala:505:22]
reg uops_3_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_3_is_rocc; // @[util.scala:505:22]
reg uops_3_is_mov; // @[util.scala:505:22]
reg [4:0] uops_3_ftq_idx; // @[util.scala:505:22]
reg uops_3_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_3_pc_lob; // @[util.scala:505:22]
reg uops_3_taken; // @[util.scala:505:22]
reg uops_3_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_3_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_3_pimm; // @[util.scala:505:22]
reg [19:0] uops_3_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_3_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_3_op2_sel; // @[util.scala:505:22]
reg uops_3_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_3_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_3_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_3_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_3_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_3_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_3_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_3_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_3_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_3_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_3_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_3_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_3_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_3_fp_ctrl_div; // @[util.scala:505:22]
reg uops_3_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_3_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_3_fp_ctrl_vec; // @[util.scala:505:22]
reg [5:0] uops_3_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_3_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_3_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_3_rxq_idx; // @[util.scala:505:22]
reg [6:0] uops_3_pdst; // @[util.scala:505:22]
reg [6:0] uops_3_prs1; // @[util.scala:505:22]
reg [6:0] uops_3_prs2; // @[util.scala:505:22]
reg [6:0] uops_3_prs3; // @[util.scala:505:22]
reg [4:0] uops_3_ppred; // @[util.scala:505:22]
reg uops_3_prs1_busy; // @[util.scala:505:22]
reg uops_3_prs2_busy; // @[util.scala:505:22]
reg uops_3_prs3_busy; // @[util.scala:505:22]
reg uops_3_ppred_busy; // @[util.scala:505:22]
reg [6:0] uops_3_stale_pdst; // @[util.scala:505:22]
reg uops_3_exception; // @[util.scala:505:22]
reg [63:0] uops_3_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_3_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_3_mem_size; // @[util.scala:505:22]
reg uops_3_mem_signed; // @[util.scala:505:22]
reg uops_3_uses_ldq; // @[util.scala:505:22]
reg uops_3_uses_stq; // @[util.scala:505:22]
reg uops_3_is_unique; // @[util.scala:505:22]
reg uops_3_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_3_csr_cmd; // @[util.scala:505:22]
reg uops_3_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_3_ldst; // @[util.scala:505:22]
reg [5:0] uops_3_lrs1; // @[util.scala:505:22]
reg [5:0] uops_3_lrs2; // @[util.scala:505:22]
reg [5:0] uops_3_lrs3; // @[util.scala:505:22]
reg [1:0] uops_3_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_3_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_3_lrs2_rtype; // @[util.scala:505:22]
reg uops_3_frs3_en; // @[util.scala:505:22]
reg uops_3_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_3_fcn_op; // @[util.scala:505:22]
reg uops_3_fp_val; // @[util.scala:505:22]
reg [2:0] uops_3_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_3_fp_typ; // @[util.scala:505:22]
reg uops_3_xcpt_pf_if; // @[util.scala:505:22]
reg uops_3_xcpt_ae_if; // @[util.scala:505:22]
reg uops_3_xcpt_ma_if; // @[util.scala:505:22]
reg uops_3_bp_debug_if; // @[util.scala:505:22]
reg uops_3_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_3_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_3_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_4_inst; // @[util.scala:505:22]
reg [31:0] uops_4_debug_inst; // @[util.scala:505:22]
reg uops_4_is_rvc; // @[util.scala:505:22]
reg [39:0] uops_4_debug_pc; // @[util.scala:505:22]
reg uops_4_iq_type_0; // @[util.scala:505:22]
reg uops_4_iq_type_1; // @[util.scala:505:22]
reg uops_4_iq_type_2; // @[util.scala:505:22]
reg uops_4_iq_type_3; // @[util.scala:505:22]
reg uops_4_fu_code_0; // @[util.scala:505:22]
reg uops_4_fu_code_1; // @[util.scala:505:22]
reg uops_4_fu_code_2; // @[util.scala:505:22]
reg uops_4_fu_code_3; // @[util.scala:505:22]
reg uops_4_fu_code_4; // @[util.scala:505:22]
reg uops_4_fu_code_5; // @[util.scala:505:22]
reg uops_4_fu_code_6; // @[util.scala:505:22]
reg uops_4_fu_code_7; // @[util.scala:505:22]
reg uops_4_fu_code_8; // @[util.scala:505:22]
reg uops_4_fu_code_9; // @[util.scala:505:22]
reg uops_4_iw_issued; // @[util.scala:505:22]
reg uops_4_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_4_iw_issued_partial_dgen; // @[util.scala:505:22]
reg [1:0] uops_4_iw_p1_speculative_child; // @[util.scala:505:22]
reg [1:0] uops_4_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_4_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_4_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_4_iw_p3_bypass_hint; // @[util.scala:505:22]
reg [1:0] uops_4_dis_col_sel; // @[util.scala:505:22]
reg [11:0] uops_4_br_mask; // @[util.scala:505:22]
reg [3:0] uops_4_br_tag; // @[util.scala:505:22]
reg [3:0] uops_4_br_type; // @[util.scala:505:22]
reg uops_4_is_sfb; // @[util.scala:505:22]
reg uops_4_is_fence; // @[util.scala:505:22]
reg uops_4_is_fencei; // @[util.scala:505:22]
reg uops_4_is_sfence; // @[util.scala:505:22]
reg uops_4_is_amo; // @[util.scala:505:22]
reg uops_4_is_eret; // @[util.scala:505:22]
reg uops_4_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_4_is_rocc; // @[util.scala:505:22]
reg uops_4_is_mov; // @[util.scala:505:22]
reg [4:0] uops_4_ftq_idx; // @[util.scala:505:22]
reg uops_4_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_4_pc_lob; // @[util.scala:505:22]
reg uops_4_taken; // @[util.scala:505:22]
reg uops_4_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_4_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_4_pimm; // @[util.scala:505:22]
reg [19:0] uops_4_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_4_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_4_op2_sel; // @[util.scala:505:22]
reg uops_4_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_4_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_4_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_4_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_4_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_4_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_4_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_4_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_4_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_4_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_4_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_4_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_4_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_4_fp_ctrl_div; // @[util.scala:505:22]
reg uops_4_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_4_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_4_fp_ctrl_vec; // @[util.scala:505:22]
reg [5:0] uops_4_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_4_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_4_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_4_rxq_idx; // @[util.scala:505:22]
reg [6:0] uops_4_pdst; // @[util.scala:505:22]
reg [6:0] uops_4_prs1; // @[util.scala:505:22]
reg [6:0] uops_4_prs2; // @[util.scala:505:22]
reg [6:0] uops_4_prs3; // @[util.scala:505:22]
reg [4:0] uops_4_ppred; // @[util.scala:505:22]
reg uops_4_prs1_busy; // @[util.scala:505:22]
reg uops_4_prs2_busy; // @[util.scala:505:22]
reg uops_4_prs3_busy; // @[util.scala:505:22]
reg uops_4_ppred_busy; // @[util.scala:505:22]
reg [6:0] uops_4_stale_pdst; // @[util.scala:505:22]
reg uops_4_exception; // @[util.scala:505:22]
reg [63:0] uops_4_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_4_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_4_mem_size; // @[util.scala:505:22]
reg uops_4_mem_signed; // @[util.scala:505:22]
reg uops_4_uses_ldq; // @[util.scala:505:22]
reg uops_4_uses_stq; // @[util.scala:505:22]
reg uops_4_is_unique; // @[util.scala:505:22]
reg uops_4_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_4_csr_cmd; // @[util.scala:505:22]
reg uops_4_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_4_ldst; // @[util.scala:505:22]
reg [5:0] uops_4_lrs1; // @[util.scala:505:22]
reg [5:0] uops_4_lrs2; // @[util.scala:505:22]
reg [5:0] uops_4_lrs3; // @[util.scala:505:22]
reg [1:0] uops_4_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_4_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_4_lrs2_rtype; // @[util.scala:505:22]
reg uops_4_frs3_en; // @[util.scala:505:22]
reg uops_4_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_4_fcn_op; // @[util.scala:505:22]
reg uops_4_fp_val; // @[util.scala:505:22]
reg [2:0] uops_4_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_4_fp_typ; // @[util.scala:505:22]
reg uops_4_xcpt_pf_if; // @[util.scala:505:22]
reg uops_4_xcpt_ae_if; // @[util.scala:505:22]
reg uops_4_xcpt_ma_if; // @[util.scala:505:22]
reg uops_4_bp_debug_if; // @[util.scala:505:22]
reg uops_4_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_4_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_4_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_5_inst; // @[util.scala:505:22]
reg [31:0] uops_5_debug_inst; // @[util.scala:505:22]
reg uops_5_is_rvc; // @[util.scala:505:22]
reg [39:0] uops_5_debug_pc; // @[util.scala:505:22]
reg uops_5_iq_type_0; // @[util.scala:505:22]
reg uops_5_iq_type_1; // @[util.scala:505:22]
reg uops_5_iq_type_2; // @[util.scala:505:22]
reg uops_5_iq_type_3; // @[util.scala:505:22]
reg uops_5_fu_code_0; // @[util.scala:505:22]
reg uops_5_fu_code_1; // @[util.scala:505:22]
reg uops_5_fu_code_2; // @[util.scala:505:22]
reg uops_5_fu_code_3; // @[util.scala:505:22]
reg uops_5_fu_code_4; // @[util.scala:505:22]
reg uops_5_fu_code_5; // @[util.scala:505:22]
reg uops_5_fu_code_6; // @[util.scala:505:22]
reg uops_5_fu_code_7; // @[util.scala:505:22]
reg uops_5_fu_code_8; // @[util.scala:505:22]
reg uops_5_fu_code_9; // @[util.scala:505:22]
reg uops_5_iw_issued; // @[util.scala:505:22]
reg uops_5_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_5_iw_issued_partial_dgen; // @[util.scala:505:22]
reg [1:0] uops_5_iw_p1_speculative_child; // @[util.scala:505:22]
reg [1:0] uops_5_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_5_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_5_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_5_iw_p3_bypass_hint; // @[util.scala:505:22]
reg [1:0] uops_5_dis_col_sel; // @[util.scala:505:22]
reg [11:0] uops_5_br_mask; // @[util.scala:505:22]
reg [3:0] uops_5_br_tag; // @[util.scala:505:22]
reg [3:0] uops_5_br_type; // @[util.scala:505:22]
reg uops_5_is_sfb; // @[util.scala:505:22]
reg uops_5_is_fence; // @[util.scala:505:22]
reg uops_5_is_fencei; // @[util.scala:505:22]
reg uops_5_is_sfence; // @[util.scala:505:22]
reg uops_5_is_amo; // @[util.scala:505:22]
reg uops_5_is_eret; // @[util.scala:505:22]
reg uops_5_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_5_is_rocc; // @[util.scala:505:22]
reg uops_5_is_mov; // @[util.scala:505:22]
reg [4:0] uops_5_ftq_idx; // @[util.scala:505:22]
reg uops_5_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_5_pc_lob; // @[util.scala:505:22]
reg uops_5_taken; // @[util.scala:505:22]
reg uops_5_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_5_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_5_pimm; // @[util.scala:505:22]
reg [19:0] uops_5_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_5_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_5_op2_sel; // @[util.scala:505:22]
reg uops_5_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_5_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_5_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_5_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_5_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_5_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_5_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_5_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_5_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_5_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_5_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_5_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_5_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_5_fp_ctrl_div; // @[util.scala:505:22]
reg uops_5_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_5_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_5_fp_ctrl_vec; // @[util.scala:505:22]
reg [5:0] uops_5_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_5_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_5_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_5_rxq_idx; // @[util.scala:505:22]
reg [6:0] uops_5_pdst; // @[util.scala:505:22]
reg [6:0] uops_5_prs1; // @[util.scala:505:22]
reg [6:0] uops_5_prs2; // @[util.scala:505:22]
reg [6:0] uops_5_prs3; // @[util.scala:505:22]
reg [4:0] uops_5_ppred; // @[util.scala:505:22]
reg uops_5_prs1_busy; // @[util.scala:505:22]
reg uops_5_prs2_busy; // @[util.scala:505:22]
reg uops_5_prs3_busy; // @[util.scala:505:22]
reg uops_5_ppred_busy; // @[util.scala:505:22]
reg [6:0] uops_5_stale_pdst; // @[util.scala:505:22]
reg uops_5_exception; // @[util.scala:505:22]
reg [63:0] uops_5_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_5_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_5_mem_size; // @[util.scala:505:22]
reg uops_5_mem_signed; // @[util.scala:505:22]
reg uops_5_uses_ldq; // @[util.scala:505:22]
reg uops_5_uses_stq; // @[util.scala:505:22]
reg uops_5_is_unique; // @[util.scala:505:22]
reg uops_5_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_5_csr_cmd; // @[util.scala:505:22]
reg uops_5_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_5_ldst; // @[util.scala:505:22]
reg [5:0] uops_5_lrs1; // @[util.scala:505:22]
reg [5:0] uops_5_lrs2; // @[util.scala:505:22]
reg [5:0] uops_5_lrs3; // @[util.scala:505:22]
reg [1:0] uops_5_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_5_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_5_lrs2_rtype; // @[util.scala:505:22]
reg uops_5_frs3_en; // @[util.scala:505:22]
reg uops_5_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_5_fcn_op; // @[util.scala:505:22]
reg uops_5_fp_val; // @[util.scala:505:22]
reg [2:0] uops_5_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_5_fp_typ; // @[util.scala:505:22]
reg uops_5_xcpt_pf_if; // @[util.scala:505:22]
reg uops_5_xcpt_ae_if; // @[util.scala:505:22]
reg uops_5_xcpt_ma_if; // @[util.scala:505:22]
reg uops_5_bp_debug_if; // @[util.scala:505:22]
reg uops_5_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_5_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_5_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_6_inst; // @[util.scala:505:22]
reg [31:0] uops_6_debug_inst; // @[util.scala:505:22]
reg uops_6_is_rvc; // @[util.scala:505:22]
reg [39:0] uops_6_debug_pc; // @[util.scala:505:22]
reg uops_6_iq_type_0; // @[util.scala:505:22]
reg uops_6_iq_type_1; // @[util.scala:505:22]
reg uops_6_iq_type_2; // @[util.scala:505:22]
reg uops_6_iq_type_3; // @[util.scala:505:22]
reg uops_6_fu_code_0; // @[util.scala:505:22]
reg uops_6_fu_code_1; // @[util.scala:505:22]
reg uops_6_fu_code_2; // @[util.scala:505:22]
reg uops_6_fu_code_3; // @[util.scala:505:22]
reg uops_6_fu_code_4; // @[util.scala:505:22]
reg uops_6_fu_code_5; // @[util.scala:505:22]
reg uops_6_fu_code_6; // @[util.scala:505:22]
reg uops_6_fu_code_7; // @[util.scala:505:22]
reg uops_6_fu_code_8; // @[util.scala:505:22]
reg uops_6_fu_code_9; // @[util.scala:505:22]
reg uops_6_iw_issued; // @[util.scala:505:22]
reg uops_6_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_6_iw_issued_partial_dgen; // @[util.scala:505:22]
reg [1:0] uops_6_iw_p1_speculative_child; // @[util.scala:505:22]
reg [1:0] uops_6_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_6_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_6_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_6_iw_p3_bypass_hint; // @[util.scala:505:22]
reg [1:0] uops_6_dis_col_sel; // @[util.scala:505:22]
reg [11:0] uops_6_br_mask; // @[util.scala:505:22]
reg [3:0] uops_6_br_tag; // @[util.scala:505:22]
reg [3:0] uops_6_br_type; // @[util.scala:505:22]
reg uops_6_is_sfb; // @[util.scala:505:22]
reg uops_6_is_fence; // @[util.scala:505:22]
reg uops_6_is_fencei; // @[util.scala:505:22]
reg uops_6_is_sfence; // @[util.scala:505:22]
reg uops_6_is_amo; // @[util.scala:505:22]
reg uops_6_is_eret; // @[util.scala:505:22]
reg uops_6_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_6_is_rocc; // @[util.scala:505:22]
reg uops_6_is_mov; // @[util.scala:505:22]
reg [4:0] uops_6_ftq_idx; // @[util.scala:505:22]
reg uops_6_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_6_pc_lob; // @[util.scala:505:22]
reg uops_6_taken; // @[util.scala:505:22]
reg uops_6_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_6_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_6_pimm; // @[util.scala:505:22]
reg [19:0] uops_6_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_6_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_6_op2_sel; // @[util.scala:505:22]
reg uops_6_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_6_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_6_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_6_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_6_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_6_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_6_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_6_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_6_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_6_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_6_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_6_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_6_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_6_fp_ctrl_div; // @[util.scala:505:22]
reg uops_6_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_6_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_6_fp_ctrl_vec; // @[util.scala:505:22]
reg [5:0] uops_6_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_6_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_6_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_6_rxq_idx; // @[util.scala:505:22]
reg [6:0] uops_6_pdst; // @[util.scala:505:22]
reg [6:0] uops_6_prs1; // @[util.scala:505:22]
reg [6:0] uops_6_prs2; // @[util.scala:505:22]
reg [6:0] uops_6_prs3; // @[util.scala:505:22]
reg [4:0] uops_6_ppred; // @[util.scala:505:22]
reg uops_6_prs1_busy; // @[util.scala:505:22]
reg uops_6_prs2_busy; // @[util.scala:505:22]
reg uops_6_prs3_busy; // @[util.scala:505:22]
reg uops_6_ppred_busy; // @[util.scala:505:22]
reg [6:0] uops_6_stale_pdst; // @[util.scala:505:22]
reg uops_6_exception; // @[util.scala:505:22]
reg [63:0] uops_6_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_6_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_6_mem_size; // @[util.scala:505:22]
reg uops_6_mem_signed; // @[util.scala:505:22]
reg uops_6_uses_ldq; // @[util.scala:505:22]
reg uops_6_uses_stq; // @[util.scala:505:22]
reg uops_6_is_unique; // @[util.scala:505:22]
reg uops_6_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_6_csr_cmd; // @[util.scala:505:22]
reg uops_6_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_6_ldst; // @[util.scala:505:22]
reg [5:0] uops_6_lrs1; // @[util.scala:505:22]
reg [5:0] uops_6_lrs2; // @[util.scala:505:22]
reg [5:0] uops_6_lrs3; // @[util.scala:505:22]
reg [1:0] uops_6_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_6_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_6_lrs2_rtype; // @[util.scala:505:22]
reg uops_6_frs3_en; // @[util.scala:505:22]
reg uops_6_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_6_fcn_op; // @[util.scala:505:22]
reg uops_6_fp_val; // @[util.scala:505:22]
reg [2:0] uops_6_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_6_fp_typ; // @[util.scala:505:22]
reg uops_6_xcpt_pf_if; // @[util.scala:505:22]
reg uops_6_xcpt_ae_if; // @[util.scala:505:22]
reg uops_6_xcpt_ma_if; // @[util.scala:505:22]
reg uops_6_bp_debug_if; // @[util.scala:505:22]
reg uops_6_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_6_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_6_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_7_inst; // @[util.scala:505:22]
reg [31:0] uops_7_debug_inst; // @[util.scala:505:22]
reg uops_7_is_rvc; // @[util.scala:505:22]
reg [39:0] uops_7_debug_pc; // @[util.scala:505:22]
reg uops_7_iq_type_0; // @[util.scala:505:22]
reg uops_7_iq_type_1; // @[util.scala:505:22]
reg uops_7_iq_type_2; // @[util.scala:505:22]
reg uops_7_iq_type_3; // @[util.scala:505:22]
reg uops_7_fu_code_0; // @[util.scala:505:22]
reg uops_7_fu_code_1; // @[util.scala:505:22]
reg uops_7_fu_code_2; // @[util.scala:505:22]
reg uops_7_fu_code_3; // @[util.scala:505:22]
reg uops_7_fu_code_4; // @[util.scala:505:22]
reg uops_7_fu_code_5; // @[util.scala:505:22]
reg uops_7_fu_code_6; // @[util.scala:505:22]
reg uops_7_fu_code_7; // @[util.scala:505:22]
reg uops_7_fu_code_8; // @[util.scala:505:22]
reg uops_7_fu_code_9; // @[util.scala:505:22]
reg uops_7_iw_issued; // @[util.scala:505:22]
reg uops_7_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_7_iw_issued_partial_dgen; // @[util.scala:505:22]
reg [1:0] uops_7_iw_p1_speculative_child; // @[util.scala:505:22]
reg [1:0] uops_7_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_7_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_7_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_7_iw_p3_bypass_hint; // @[util.scala:505:22]
reg [1:0] uops_7_dis_col_sel; // @[util.scala:505:22]
reg [11:0] uops_7_br_mask; // @[util.scala:505:22]
reg [3:0] uops_7_br_tag; // @[util.scala:505:22]
reg [3:0] uops_7_br_type; // @[util.scala:505:22]
reg uops_7_is_sfb; // @[util.scala:505:22]
reg uops_7_is_fence; // @[util.scala:505:22]
reg uops_7_is_fencei; // @[util.scala:505:22]
reg uops_7_is_sfence; // @[util.scala:505:22]
reg uops_7_is_amo; // @[util.scala:505:22]
reg uops_7_is_eret; // @[util.scala:505:22]
reg uops_7_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_7_is_rocc; // @[util.scala:505:22]
reg uops_7_is_mov; // @[util.scala:505:22]
reg [4:0] uops_7_ftq_idx; // @[util.scala:505:22]
reg uops_7_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_7_pc_lob; // @[util.scala:505:22]
reg uops_7_taken; // @[util.scala:505:22]
reg uops_7_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_7_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_7_pimm; // @[util.scala:505:22]
reg [19:0] uops_7_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_7_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_7_op2_sel; // @[util.scala:505:22]
reg uops_7_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_7_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_7_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_7_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_7_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_7_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_7_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_7_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_7_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_7_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_7_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_7_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_7_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_7_fp_ctrl_div; // @[util.scala:505:22]
reg uops_7_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_7_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_7_fp_ctrl_vec; // @[util.scala:505:22]
reg [5:0] uops_7_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_7_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_7_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_7_rxq_idx; // @[util.scala:505:22]
reg [6:0] uops_7_pdst; // @[util.scala:505:22]
reg [6:0] uops_7_prs1; // @[util.scala:505:22]
reg [6:0] uops_7_prs2; // @[util.scala:505:22]
reg [6:0] uops_7_prs3; // @[util.scala:505:22]
reg [4:0] uops_7_ppred; // @[util.scala:505:22]
reg uops_7_prs1_busy; // @[util.scala:505:22]
reg uops_7_prs2_busy; // @[util.scala:505:22]
reg uops_7_prs3_busy; // @[util.scala:505:22]
reg uops_7_ppred_busy; // @[util.scala:505:22]
reg [6:0] uops_7_stale_pdst; // @[util.scala:505:22]
reg uops_7_exception; // @[util.scala:505:22]
reg [63:0] uops_7_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_7_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_7_mem_size; // @[util.scala:505:22]
reg uops_7_mem_signed; // @[util.scala:505:22]
reg uops_7_uses_ldq; // @[util.scala:505:22]
reg uops_7_uses_stq; // @[util.scala:505:22]
reg uops_7_is_unique; // @[util.scala:505:22]
reg uops_7_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_7_csr_cmd; // @[util.scala:505:22]
reg uops_7_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_7_ldst; // @[util.scala:505:22]
reg [5:0] uops_7_lrs1; // @[util.scala:505:22]
reg [5:0] uops_7_lrs2; // @[util.scala:505:22]
reg [5:0] uops_7_lrs3; // @[util.scala:505:22]
reg [1:0] uops_7_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_7_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_7_lrs2_rtype; // @[util.scala:505:22]
reg uops_7_frs3_en; // @[util.scala:505:22]
reg uops_7_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_7_fcn_op; // @[util.scala:505:22]
reg uops_7_fp_val; // @[util.scala:505:22]
reg [2:0] uops_7_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_7_fp_typ; // @[util.scala:505:22]
reg uops_7_xcpt_pf_if; // @[util.scala:505:22]
reg uops_7_xcpt_ae_if; // @[util.scala:505:22]
reg uops_7_xcpt_ma_if; // @[util.scala:505:22]
reg uops_7_bp_debug_if; // @[util.scala:505:22]
reg uops_7_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_7_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_7_debug_tsrc; // @[util.scala:505:22]
reg [2:0] enq_ptr_value; // @[Counter.scala:61:40]
reg [2:0] deq_ptr_value; // @[Counter.scala:61:40]
reg maybe_full; // @[util.scala:509:29]
wire ptr_match = enq_ptr_value == deq_ptr_value; // @[Counter.scala:61:40]
wire _io_empty_T = ~maybe_full; // @[util.scala:509:29, :512:30]
assign _io_empty_T_1 = ptr_match & _io_empty_T; // @[util.scala:511:35, :512:{27,30}]
assign io_empty = _io_empty_T_1; // @[util.scala:458:7, :512:27]
wire _GEN = ptr_match & maybe_full; // @[util.scala:509:29, :511:35, :513:26]
wire full; // @[util.scala:513:26]
assign full = _GEN; // @[util.scala:513:26]
wire _io_count_T; // @[util.scala:553:34]
assign _io_count_T = _GEN; // @[util.scala:513:26, :553:34]
wire _do_enq_T = io_enq_ready_0 & io_enq_valid_0; // @[Decoupled.scala:51:35]
wire [11:0] _do_enq_T_1 = io_brupdate_b1_mispredict_mask_0 & io_enq_bits_uop_br_mask_0; // @[util.scala:126:51, :458:7]
wire _do_enq_T_2 = |_do_enq_T_1; // @[util.scala:126:{51,59}]
wire _do_enq_T_3 = _do_enq_T_2; // @[util.scala:61:61, :126:59]
wire _do_enq_T_4 = ~_do_enq_T_3; // @[util.scala:61:61, :514:42]
wire _do_enq_T_5 = _do_enq_T & _do_enq_T_4; // @[Decoupled.scala:51:35]
wire _do_enq_T_7 = ~_do_enq_T_6; // @[util.scala:514:{102,113}]
wire _do_enq_T_8 = _do_enq_T_5 & _do_enq_T_7; // @[util.scala:514:{39,99,102}]
wire do_enq = _do_enq_T_8; // @[util.scala:514:{26,99}]
wire [7:0] _GEN_0 = {{valids_7}, {valids_6}, {valids_5}, {valids_4}, {valids_3}, {valids_2}, {valids_1}, {valids_0}}; // @[util.scala:504:26, :515:44]
wire _GEN_1 = _GEN_0[deq_ptr_value]; // @[Counter.scala:61:40]
wire _do_deq_T = ~_GEN_1; // @[util.scala:515:44]
wire _do_deq_T_1 = io_deq_ready_0 | _do_deq_T; // @[util.scala:458:7, :515:{41,44}]
wire _do_deq_T_2 = ~io_empty; // @[util.scala:458:7, :515:71]
wire _do_deq_T_3 = _do_deq_T_1 & _do_deq_T_2; // @[util.scala:515:{41,68,71}]
wire do_deq = _do_deq_T_3; // @[util.scala:515:{26,68}]
wire [11:0] _valids_0_T = io_brupdate_b1_mispredict_mask_0 & uops_0_br_mask; // @[util.scala:126:51, :458:7, :505:22]
wire _valids_0_T_1 = |_valids_0_T; // @[util.scala:126:{51,59}]
wire _valids_0_T_2 = _valids_0_T_1; // @[util.scala:61:61, :126:59]
wire _valids_0_T_3 = ~_valids_0_T_2; // @[util.scala:61:61, :520:34]
wire _valids_0_T_4 = valids_0 & _valids_0_T_3; // @[util.scala:504:26, :520:{31,34}]
wire _valids_0_T_6 = ~_valids_0_T_5; // @[util.scala:520:{83,94}]
wire _valids_0_T_7 = _valids_0_T_4 & _valids_0_T_6; // @[util.scala:520:{31,80,83}]
wire [11:0] _uops_0_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:97:23, :458:7]
wire [11:0] _uops_0_br_mask_T_1 = uops_0_br_mask & _uops_0_br_mask_T; // @[util.scala:97:{21,23}, :505:22]
wire [11:0] _valids_1_T = io_brupdate_b1_mispredict_mask_0 & uops_1_br_mask; // @[util.scala:126:51, :458:7, :505:22]
wire _valids_1_T_1 = |_valids_1_T; // @[util.scala:126:{51,59}]
wire _valids_1_T_2 = _valids_1_T_1; // @[util.scala:61:61, :126:59]
wire _valids_1_T_3 = ~_valids_1_T_2; // @[util.scala:61:61, :520:34]
wire _valids_1_T_4 = valids_1 & _valids_1_T_3; // @[util.scala:504:26, :520:{31,34}]
wire _valids_1_T_6 = ~_valids_1_T_5; // @[util.scala:520:{83,94}]
wire _valids_1_T_7 = _valids_1_T_4 & _valids_1_T_6; // @[util.scala:520:{31,80,83}]
wire [11:0] _uops_1_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:97:23, :458:7]
wire [11:0] _uops_1_br_mask_T_1 = uops_1_br_mask & _uops_1_br_mask_T; // @[util.scala:97:{21,23}, :505:22]
wire [11:0] _valids_2_T = io_brupdate_b1_mispredict_mask_0 & uops_2_br_mask; // @[util.scala:126:51, :458:7, :505:22]
wire _valids_2_T_1 = |_valids_2_T; // @[util.scala:126:{51,59}]
wire _valids_2_T_2 = _valids_2_T_1; // @[util.scala:61:61, :126:59]
wire _valids_2_T_3 = ~_valids_2_T_2; // @[util.scala:61:61, :520:34]
wire _valids_2_T_4 = valids_2 & _valids_2_T_3; // @[util.scala:504:26, :520:{31,34}]
wire _valids_2_T_6 = ~_valids_2_T_5; // @[util.scala:520:{83,94}]
wire _valids_2_T_7 = _valids_2_T_4 & _valids_2_T_6; // @[util.scala:520:{31,80,83}]
wire [11:0] _uops_2_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:97:23, :458:7]
wire [11:0] _uops_2_br_mask_T_1 = uops_2_br_mask & _uops_2_br_mask_T; // @[util.scala:97:{21,23}, :505:22]
wire [11:0] _valids_3_T = io_brupdate_b1_mispredict_mask_0 & uops_3_br_mask; // @[util.scala:126:51, :458:7, :505:22]
wire _valids_3_T_1 = |_valids_3_T; // @[util.scala:126:{51,59}]
wire _valids_3_T_2 = _valids_3_T_1; // @[util.scala:61:61, :126:59]
wire _valids_3_T_3 = ~_valids_3_T_2; // @[util.scala:61:61, :520:34]
wire _valids_3_T_4 = valids_3 & _valids_3_T_3; // @[util.scala:504:26, :520:{31,34}]
wire _valids_3_T_6 = ~_valids_3_T_5; // @[util.scala:520:{83,94}]
wire _valids_3_T_7 = _valids_3_T_4 & _valids_3_T_6; // @[util.scala:520:{31,80,83}]
wire [11:0] _uops_3_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:97:23, :458:7]
wire [11:0] _uops_3_br_mask_T_1 = uops_3_br_mask & _uops_3_br_mask_T; // @[util.scala:97:{21,23}, :505:22]
wire [11:0] _valids_4_T = io_brupdate_b1_mispredict_mask_0 & uops_4_br_mask; // @[util.scala:126:51, :458:7, :505:22]
wire _valids_4_T_1 = |_valids_4_T; // @[util.scala:126:{51,59}]
wire _valids_4_T_2 = _valids_4_T_1; // @[util.scala:61:61, :126:59]
wire _valids_4_T_3 = ~_valids_4_T_2; // @[util.scala:61:61, :520:34]
wire _valids_4_T_4 = valids_4 & _valids_4_T_3; // @[util.scala:504:26, :520:{31,34}]
wire _valids_4_T_6 = ~_valids_4_T_5; // @[util.scala:520:{83,94}]
wire _valids_4_T_7 = _valids_4_T_4 & _valids_4_T_6; // @[util.scala:520:{31,80,83}]
wire [11:0] _uops_4_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:97:23, :458:7]
wire [11:0] _uops_4_br_mask_T_1 = uops_4_br_mask & _uops_4_br_mask_T; // @[util.scala:97:{21,23}, :505:22]
wire [11:0] _valids_5_T = io_brupdate_b1_mispredict_mask_0 & uops_5_br_mask; // @[util.scala:126:51, :458:7, :505:22]
wire _valids_5_T_1 = |_valids_5_T; // @[util.scala:126:{51,59}]
wire _valids_5_T_2 = _valids_5_T_1; // @[util.scala:61:61, :126:59]
wire _valids_5_T_3 = ~_valids_5_T_2; // @[util.scala:61:61, :520:34]
wire _valids_5_T_4 = valids_5 & _valids_5_T_3; // @[util.scala:504:26, :520:{31,34}]
wire _valids_5_T_6 = ~_valids_5_T_5; // @[util.scala:520:{83,94}]
wire _valids_5_T_7 = _valids_5_T_4 & _valids_5_T_6; // @[util.scala:520:{31,80,83}]
wire [11:0] _uops_5_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:97:23, :458:7]
wire [11:0] _uops_5_br_mask_T_1 = uops_5_br_mask & _uops_5_br_mask_T; // @[util.scala:97:{21,23}, :505:22]
wire [11:0] _valids_6_T = io_brupdate_b1_mispredict_mask_0 & uops_6_br_mask; // @[util.scala:126:51, :458:7, :505:22]
wire _valids_6_T_1 = |_valids_6_T; // @[util.scala:126:{51,59}]
wire _valids_6_T_2 = _valids_6_T_1; // @[util.scala:61:61, :126:59]
wire _valids_6_T_3 = ~_valids_6_T_2; // @[util.scala:61:61, :520:34]
wire _valids_6_T_4 = valids_6 & _valids_6_T_3; // @[util.scala:504:26, :520:{31,34}]
wire _valids_6_T_6 = ~_valids_6_T_5; // @[util.scala:520:{83,94}]
wire _valids_6_T_7 = _valids_6_T_4 & _valids_6_T_6; // @[util.scala:520:{31,80,83}]
wire [11:0] _uops_6_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:97:23, :458:7]
wire [11:0] _uops_6_br_mask_T_1 = uops_6_br_mask & _uops_6_br_mask_T; // @[util.scala:97:{21,23}, :505:22]
wire [11:0] _valids_7_T = io_brupdate_b1_mispredict_mask_0 & uops_7_br_mask; // @[util.scala:126:51, :458:7, :505:22]
wire _valids_7_T_1 = |_valids_7_T; // @[util.scala:126:{51,59}]
wire _valids_7_T_2 = _valids_7_T_1; // @[util.scala:61:61, :126:59]
wire _valids_7_T_3 = ~_valids_7_T_2; // @[util.scala:61:61, :520:34]
wire _valids_7_T_4 = valids_7 & _valids_7_T_3; // @[util.scala:504:26, :520:{31,34}]
wire _valids_7_T_6 = ~_valids_7_T_5; // @[util.scala:520:{83,94}]
wire _valids_7_T_7 = _valids_7_T_4 & _valids_7_T_6; // @[util.scala:520:{31,80,83}]
wire [11:0] _uops_7_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:97:23, :458:7]
wire [11:0] _uops_7_br_mask_T_1 = uops_7_br_mask & _uops_7_br_mask_T; // @[util.scala:97:{21,23}, :505:22]
wire [11:0] _uops_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:93:27, :97:23, :458:7]
wire [11:0] _uops_br_mask_T_1 = io_enq_bits_uop_br_mask_0 & _uops_br_mask_T; // @[util.scala:93:{25,27}, :458:7]
wire wrap = &enq_ptr_value; // @[Counter.scala:61:40, :73:24]
wire [3:0] _GEN_2 = {1'h0, enq_ptr_value}; // @[Counter.scala:61:40, :77:24]
wire [3:0] _value_T = _GEN_2 + 4'h1; // @[Counter.scala:77:24]
wire [2:0] _value_T_1 = _value_T[2:0]; // @[Counter.scala:77:24]
wire wrap_1 = &deq_ptr_value; // @[Counter.scala:61:40, :73:24]
wire [3:0] _GEN_3 = {1'h0, deq_ptr_value}; // @[Counter.scala:61:40, :77:24]
wire [3:0] _value_T_2 = _GEN_3 + 4'h1; // @[Counter.scala:77:24]
wire [2:0] _value_T_3 = _value_T_2[2:0]; // @[Counter.scala:77:24]
assign _io_enq_ready_T = ~full; // @[util.scala:513:26, :543:21]
assign io_enq_ready_0 = _io_enq_ready_T; // @[util.scala:458:7, :543:21]
assign io_deq_bits_uop_inst_0 = out_uop_inst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_debug_inst_0 = out_uop_debug_inst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_rvc_0 = out_uop_is_rvc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_debug_pc_0 = out_uop_debug_pc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iq_type_0_0 = out_uop_iq_type_0; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iq_type_1_0 = out_uop_iq_type_1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iq_type_2_0 = out_uop_iq_type_2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iq_type_3_0 = out_uop_iq_type_3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_0_0 = out_uop_fu_code_0; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_1_0 = out_uop_fu_code_1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_2_0 = out_uop_fu_code_2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_3_0 = out_uop_fu_code_3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_4_0 = out_uop_fu_code_4; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_5_0 = out_uop_fu_code_5; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_6_0 = out_uop_fu_code_6; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_7_0 = out_uop_fu_code_7; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_8_0 = out_uop_fu_code_8; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_9_0 = out_uop_fu_code_9; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_issued_0 = out_uop_iw_issued; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_issued_partial_agen_0 = out_uop_iw_issued_partial_agen; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_issued_partial_dgen_0 = out_uop_iw_issued_partial_dgen; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p1_speculative_child_0 = out_uop_iw_p1_speculative_child; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p2_speculative_child_0 = out_uop_iw_p2_speculative_child; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p1_bypass_hint_0 = out_uop_iw_p1_bypass_hint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p2_bypass_hint_0 = out_uop_iw_p2_bypass_hint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p3_bypass_hint_0 = out_uop_iw_p3_bypass_hint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_dis_col_sel_0 = out_uop_dis_col_sel; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_br_mask_0 = out_uop_br_mask; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_br_tag_0 = out_uop_br_tag; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_br_type_0 = out_uop_br_type; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_sfb_0 = out_uop_is_sfb; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_fence_0 = out_uop_is_fence; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_fencei_0 = out_uop_is_fencei; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_sfence_0 = out_uop_is_sfence; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_amo_0 = out_uop_is_amo; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_eret_0 = out_uop_is_eret; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_sys_pc2epc_0 = out_uop_is_sys_pc2epc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_rocc_0 = out_uop_is_rocc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_mov_0 = out_uop_is_mov; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ftq_idx_0 = out_uop_ftq_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_edge_inst_0 = out_uop_edge_inst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_pc_lob_0 = out_uop_pc_lob; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_taken_0 = out_uop_taken; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_imm_rename_0 = out_uop_imm_rename; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_imm_sel_0 = out_uop_imm_sel; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_pimm_0 = out_uop_pimm; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_imm_packed_0 = out_uop_imm_packed; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_op1_sel_0 = out_uop_op1_sel; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_op2_sel_0 = out_uop_op2_sel; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_ldst_0 = out_uop_fp_ctrl_ldst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_wen_0 = out_uop_fp_ctrl_wen; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_ren1_0 = out_uop_fp_ctrl_ren1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_ren2_0 = out_uop_fp_ctrl_ren2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_ren3_0 = out_uop_fp_ctrl_ren3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_swap12_0 = out_uop_fp_ctrl_swap12; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_swap23_0 = out_uop_fp_ctrl_swap23; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_typeTagIn_0 = out_uop_fp_ctrl_typeTagIn; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_typeTagOut_0 = out_uop_fp_ctrl_typeTagOut; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_fromint_0 = out_uop_fp_ctrl_fromint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_toint_0 = out_uop_fp_ctrl_toint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_fastpipe_0 = out_uop_fp_ctrl_fastpipe; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_fma_0 = out_uop_fp_ctrl_fma; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_div_0 = out_uop_fp_ctrl_div; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_sqrt_0 = out_uop_fp_ctrl_sqrt; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_wflags_0 = out_uop_fp_ctrl_wflags; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_vec_0 = out_uop_fp_ctrl_vec; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_rob_idx_0 = out_uop_rob_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ldq_idx_0 = out_uop_ldq_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_stq_idx_0 = out_uop_stq_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_rxq_idx_0 = out_uop_rxq_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_pdst_0 = out_uop_pdst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs1_0 = out_uop_prs1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs2_0 = out_uop_prs2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs3_0 = out_uop_prs3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ppred_0 = out_uop_ppred; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs1_busy_0 = out_uop_prs1_busy; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs2_busy_0 = out_uop_prs2_busy; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs3_busy_0 = out_uop_prs3_busy; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ppred_busy_0 = out_uop_ppred_busy; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_stale_pdst_0 = out_uop_stale_pdst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_exception_0 = out_uop_exception; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_exc_cause_0 = out_uop_exc_cause; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_mem_cmd_0 = out_uop_mem_cmd; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_mem_size_0 = out_uop_mem_size; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_mem_signed_0 = out_uop_mem_signed; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_uses_ldq_0 = out_uop_uses_ldq; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_uses_stq_0 = out_uop_uses_stq; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_unique_0 = out_uop_is_unique; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_flush_on_commit_0 = out_uop_flush_on_commit; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_csr_cmd_0 = out_uop_csr_cmd; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ldst_is_rs1_0 = out_uop_ldst_is_rs1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ldst_0 = out_uop_ldst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs1_0 = out_uop_lrs1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs2_0 = out_uop_lrs2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs3_0 = out_uop_lrs3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_dst_rtype_0 = out_uop_dst_rtype; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs1_rtype_0 = out_uop_lrs1_rtype; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs2_rtype_0 = out_uop_lrs2_rtype; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_frs3_en_0 = out_uop_frs3_en; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fcn_dw_0 = out_uop_fcn_dw; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fcn_op_0 = out_uop_fcn_op; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_val_0 = out_uop_fp_val; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_rm_0 = out_uop_fp_rm; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_typ_0 = out_uop_fp_typ; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_xcpt_pf_if_0 = out_uop_xcpt_pf_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_xcpt_ae_if_0 = out_uop_xcpt_ae_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_xcpt_ma_if_0 = out_uop_xcpt_ma_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_bp_debug_if_0 = out_uop_bp_debug_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_bp_xcpt_if_0 = out_uop_bp_xcpt_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_debug_fsrc_0 = out_uop_debug_fsrc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_debug_tsrc_0 = out_uop_debug_tsrc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_data_0 = out_data; // @[util.scala:458:7, :545:19]
assign io_deq_bits_predicated_0 = out_predicated; // @[util.scala:458:7, :545:19]
assign io_deq_bits_fflags_valid_0 = out_fflags_valid; // @[util.scala:458:7, :545:19]
assign io_deq_bits_fflags_bits_0 = out_fflags_bits; // @[util.scala:458:7, :545:19]
wire [7:0][31:0] _GEN_4 = {{uops_7_inst}, {uops_6_inst}, {uops_5_inst}, {uops_4_inst}, {uops_3_inst}, {uops_2_inst}, {uops_1_inst}, {uops_0_inst}}; // @[util.scala:505:22, :547:21]
assign out_uop_inst = _GEN_4[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][31:0] _GEN_5 = {{uops_7_debug_inst}, {uops_6_debug_inst}, {uops_5_debug_inst}, {uops_4_debug_inst}, {uops_3_debug_inst}, {uops_2_debug_inst}, {uops_1_debug_inst}, {uops_0_debug_inst}}; // @[util.scala:505:22, :547:21]
assign out_uop_debug_inst = _GEN_5[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_6 = {{uops_7_is_rvc}, {uops_6_is_rvc}, {uops_5_is_rvc}, {uops_4_is_rvc}, {uops_3_is_rvc}, {uops_2_is_rvc}, {uops_1_is_rvc}, {uops_0_is_rvc}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_rvc = _GEN_6[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][39:0] _GEN_7 = {{uops_7_debug_pc}, {uops_6_debug_pc}, {uops_5_debug_pc}, {uops_4_debug_pc}, {uops_3_debug_pc}, {uops_2_debug_pc}, {uops_1_debug_pc}, {uops_0_debug_pc}}; // @[util.scala:505:22, :547:21]
assign out_uop_debug_pc = _GEN_7[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_8 = {{uops_7_iq_type_0}, {uops_6_iq_type_0}, {uops_5_iq_type_0}, {uops_4_iq_type_0}, {uops_3_iq_type_0}, {uops_2_iq_type_0}, {uops_1_iq_type_0}, {uops_0_iq_type_0}}; // @[util.scala:505:22, :547:21]
assign out_uop_iq_type_0 = _GEN_8[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_9 = {{uops_7_iq_type_1}, {uops_6_iq_type_1}, {uops_5_iq_type_1}, {uops_4_iq_type_1}, {uops_3_iq_type_1}, {uops_2_iq_type_1}, {uops_1_iq_type_1}, {uops_0_iq_type_1}}; // @[util.scala:505:22, :547:21]
assign out_uop_iq_type_1 = _GEN_9[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_10 = {{uops_7_iq_type_2}, {uops_6_iq_type_2}, {uops_5_iq_type_2}, {uops_4_iq_type_2}, {uops_3_iq_type_2}, {uops_2_iq_type_2}, {uops_1_iq_type_2}, {uops_0_iq_type_2}}; // @[util.scala:505:22, :547:21]
assign out_uop_iq_type_2 = _GEN_10[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_11 = {{uops_7_iq_type_3}, {uops_6_iq_type_3}, {uops_5_iq_type_3}, {uops_4_iq_type_3}, {uops_3_iq_type_3}, {uops_2_iq_type_3}, {uops_1_iq_type_3}, {uops_0_iq_type_3}}; // @[util.scala:505:22, :547:21]
assign out_uop_iq_type_3 = _GEN_11[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_12 = {{uops_7_fu_code_0}, {uops_6_fu_code_0}, {uops_5_fu_code_0}, {uops_4_fu_code_0}, {uops_3_fu_code_0}, {uops_2_fu_code_0}, {uops_1_fu_code_0}, {uops_0_fu_code_0}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_0 = _GEN_12[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_13 = {{uops_7_fu_code_1}, {uops_6_fu_code_1}, {uops_5_fu_code_1}, {uops_4_fu_code_1}, {uops_3_fu_code_1}, {uops_2_fu_code_1}, {uops_1_fu_code_1}, {uops_0_fu_code_1}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_1 = _GEN_13[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_14 = {{uops_7_fu_code_2}, {uops_6_fu_code_2}, {uops_5_fu_code_2}, {uops_4_fu_code_2}, {uops_3_fu_code_2}, {uops_2_fu_code_2}, {uops_1_fu_code_2}, {uops_0_fu_code_2}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_2 = _GEN_14[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_15 = {{uops_7_fu_code_3}, {uops_6_fu_code_3}, {uops_5_fu_code_3}, {uops_4_fu_code_3}, {uops_3_fu_code_3}, {uops_2_fu_code_3}, {uops_1_fu_code_3}, {uops_0_fu_code_3}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_3 = _GEN_15[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_16 = {{uops_7_fu_code_4}, {uops_6_fu_code_4}, {uops_5_fu_code_4}, {uops_4_fu_code_4}, {uops_3_fu_code_4}, {uops_2_fu_code_4}, {uops_1_fu_code_4}, {uops_0_fu_code_4}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_4 = _GEN_16[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_17 = {{uops_7_fu_code_5}, {uops_6_fu_code_5}, {uops_5_fu_code_5}, {uops_4_fu_code_5}, {uops_3_fu_code_5}, {uops_2_fu_code_5}, {uops_1_fu_code_5}, {uops_0_fu_code_5}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_5 = _GEN_17[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_18 = {{uops_7_fu_code_6}, {uops_6_fu_code_6}, {uops_5_fu_code_6}, {uops_4_fu_code_6}, {uops_3_fu_code_6}, {uops_2_fu_code_6}, {uops_1_fu_code_6}, {uops_0_fu_code_6}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_6 = _GEN_18[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_19 = {{uops_7_fu_code_7}, {uops_6_fu_code_7}, {uops_5_fu_code_7}, {uops_4_fu_code_7}, {uops_3_fu_code_7}, {uops_2_fu_code_7}, {uops_1_fu_code_7}, {uops_0_fu_code_7}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_7 = _GEN_19[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_20 = {{uops_7_fu_code_8}, {uops_6_fu_code_8}, {uops_5_fu_code_8}, {uops_4_fu_code_8}, {uops_3_fu_code_8}, {uops_2_fu_code_8}, {uops_1_fu_code_8}, {uops_0_fu_code_8}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_8 = _GEN_20[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_21 = {{uops_7_fu_code_9}, {uops_6_fu_code_9}, {uops_5_fu_code_9}, {uops_4_fu_code_9}, {uops_3_fu_code_9}, {uops_2_fu_code_9}, {uops_1_fu_code_9}, {uops_0_fu_code_9}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_9 = _GEN_21[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_22 = {{uops_7_iw_issued}, {uops_6_iw_issued}, {uops_5_iw_issued}, {uops_4_iw_issued}, {uops_3_iw_issued}, {uops_2_iw_issued}, {uops_1_iw_issued}, {uops_0_iw_issued}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_issued = _GEN_22[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_23 = {{uops_7_iw_issued_partial_agen}, {uops_6_iw_issued_partial_agen}, {uops_5_iw_issued_partial_agen}, {uops_4_iw_issued_partial_agen}, {uops_3_iw_issued_partial_agen}, {uops_2_iw_issued_partial_agen}, {uops_1_iw_issued_partial_agen}, {uops_0_iw_issued_partial_agen}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_issued_partial_agen = _GEN_23[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_24 = {{uops_7_iw_issued_partial_dgen}, {uops_6_iw_issued_partial_dgen}, {uops_5_iw_issued_partial_dgen}, {uops_4_iw_issued_partial_dgen}, {uops_3_iw_issued_partial_dgen}, {uops_2_iw_issued_partial_dgen}, {uops_1_iw_issued_partial_dgen}, {uops_0_iw_issued_partial_dgen}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_issued_partial_dgen = _GEN_24[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][1:0] _GEN_25 = {{uops_7_iw_p1_speculative_child}, {uops_6_iw_p1_speculative_child}, {uops_5_iw_p1_speculative_child}, {uops_4_iw_p1_speculative_child}, {uops_3_iw_p1_speculative_child}, {uops_2_iw_p1_speculative_child}, {uops_1_iw_p1_speculative_child}, {uops_0_iw_p1_speculative_child}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p1_speculative_child = _GEN_25[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][1:0] _GEN_26 = {{uops_7_iw_p2_speculative_child}, {uops_6_iw_p2_speculative_child}, {uops_5_iw_p2_speculative_child}, {uops_4_iw_p2_speculative_child}, {uops_3_iw_p2_speculative_child}, {uops_2_iw_p2_speculative_child}, {uops_1_iw_p2_speculative_child}, {uops_0_iw_p2_speculative_child}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p2_speculative_child = _GEN_26[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_27 = {{uops_7_iw_p1_bypass_hint}, {uops_6_iw_p1_bypass_hint}, {uops_5_iw_p1_bypass_hint}, {uops_4_iw_p1_bypass_hint}, {uops_3_iw_p1_bypass_hint}, {uops_2_iw_p1_bypass_hint}, {uops_1_iw_p1_bypass_hint}, {uops_0_iw_p1_bypass_hint}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p1_bypass_hint = _GEN_27[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_28 = {{uops_7_iw_p2_bypass_hint}, {uops_6_iw_p2_bypass_hint}, {uops_5_iw_p2_bypass_hint}, {uops_4_iw_p2_bypass_hint}, {uops_3_iw_p2_bypass_hint}, {uops_2_iw_p2_bypass_hint}, {uops_1_iw_p2_bypass_hint}, {uops_0_iw_p2_bypass_hint}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p2_bypass_hint = _GEN_28[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_29 = {{uops_7_iw_p3_bypass_hint}, {uops_6_iw_p3_bypass_hint}, {uops_5_iw_p3_bypass_hint}, {uops_4_iw_p3_bypass_hint}, {uops_3_iw_p3_bypass_hint}, {uops_2_iw_p3_bypass_hint}, {uops_1_iw_p3_bypass_hint}, {uops_0_iw_p3_bypass_hint}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p3_bypass_hint = _GEN_29[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][1:0] _GEN_30 = {{uops_7_dis_col_sel}, {uops_6_dis_col_sel}, {uops_5_dis_col_sel}, {uops_4_dis_col_sel}, {uops_3_dis_col_sel}, {uops_2_dis_col_sel}, {uops_1_dis_col_sel}, {uops_0_dis_col_sel}}; // @[util.scala:505:22, :547:21]
assign out_uop_dis_col_sel = _GEN_30[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][11:0] _GEN_31 = {{uops_7_br_mask}, {uops_6_br_mask}, {uops_5_br_mask}, {uops_4_br_mask}, {uops_3_br_mask}, {uops_2_br_mask}, {uops_1_br_mask}, {uops_0_br_mask}}; // @[util.scala:505:22, :547:21]
assign out_uop_br_mask = _GEN_31[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][3:0] _GEN_32 = {{uops_7_br_tag}, {uops_6_br_tag}, {uops_5_br_tag}, {uops_4_br_tag}, {uops_3_br_tag}, {uops_2_br_tag}, {uops_1_br_tag}, {uops_0_br_tag}}; // @[util.scala:505:22, :547:21]
assign out_uop_br_tag = _GEN_32[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][3:0] _GEN_33 = {{uops_7_br_type}, {uops_6_br_type}, {uops_5_br_type}, {uops_4_br_type}, {uops_3_br_type}, {uops_2_br_type}, {uops_1_br_type}, {uops_0_br_type}}; // @[util.scala:505:22, :547:21]
assign out_uop_br_type = _GEN_33[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_34 = {{uops_7_is_sfb}, {uops_6_is_sfb}, {uops_5_is_sfb}, {uops_4_is_sfb}, {uops_3_is_sfb}, {uops_2_is_sfb}, {uops_1_is_sfb}, {uops_0_is_sfb}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_sfb = _GEN_34[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_35 = {{uops_7_is_fence}, {uops_6_is_fence}, {uops_5_is_fence}, {uops_4_is_fence}, {uops_3_is_fence}, {uops_2_is_fence}, {uops_1_is_fence}, {uops_0_is_fence}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_fence = _GEN_35[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_36 = {{uops_7_is_fencei}, {uops_6_is_fencei}, {uops_5_is_fencei}, {uops_4_is_fencei}, {uops_3_is_fencei}, {uops_2_is_fencei}, {uops_1_is_fencei}, {uops_0_is_fencei}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_fencei = _GEN_36[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_37 = {{uops_7_is_sfence}, {uops_6_is_sfence}, {uops_5_is_sfence}, {uops_4_is_sfence}, {uops_3_is_sfence}, {uops_2_is_sfence}, {uops_1_is_sfence}, {uops_0_is_sfence}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_sfence = _GEN_37[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_38 = {{uops_7_is_amo}, {uops_6_is_amo}, {uops_5_is_amo}, {uops_4_is_amo}, {uops_3_is_amo}, {uops_2_is_amo}, {uops_1_is_amo}, {uops_0_is_amo}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_amo = _GEN_38[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_39 = {{uops_7_is_eret}, {uops_6_is_eret}, {uops_5_is_eret}, {uops_4_is_eret}, {uops_3_is_eret}, {uops_2_is_eret}, {uops_1_is_eret}, {uops_0_is_eret}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_eret = _GEN_39[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_40 = {{uops_7_is_sys_pc2epc}, {uops_6_is_sys_pc2epc}, {uops_5_is_sys_pc2epc}, {uops_4_is_sys_pc2epc}, {uops_3_is_sys_pc2epc}, {uops_2_is_sys_pc2epc}, {uops_1_is_sys_pc2epc}, {uops_0_is_sys_pc2epc}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_sys_pc2epc = _GEN_40[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_41 = {{uops_7_is_rocc}, {uops_6_is_rocc}, {uops_5_is_rocc}, {uops_4_is_rocc}, {uops_3_is_rocc}, {uops_2_is_rocc}, {uops_1_is_rocc}, {uops_0_is_rocc}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_rocc = _GEN_41[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_42 = {{uops_7_is_mov}, {uops_6_is_mov}, {uops_5_is_mov}, {uops_4_is_mov}, {uops_3_is_mov}, {uops_2_is_mov}, {uops_1_is_mov}, {uops_0_is_mov}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_mov = _GEN_42[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][4:0] _GEN_43 = {{uops_7_ftq_idx}, {uops_6_ftq_idx}, {uops_5_ftq_idx}, {uops_4_ftq_idx}, {uops_3_ftq_idx}, {uops_2_ftq_idx}, {uops_1_ftq_idx}, {uops_0_ftq_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_ftq_idx = _GEN_43[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_44 = {{uops_7_edge_inst}, {uops_6_edge_inst}, {uops_5_edge_inst}, {uops_4_edge_inst}, {uops_3_edge_inst}, {uops_2_edge_inst}, {uops_1_edge_inst}, {uops_0_edge_inst}}; // @[util.scala:505:22, :547:21]
assign out_uop_edge_inst = _GEN_44[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][5:0] _GEN_45 = {{uops_7_pc_lob}, {uops_6_pc_lob}, {uops_5_pc_lob}, {uops_4_pc_lob}, {uops_3_pc_lob}, {uops_2_pc_lob}, {uops_1_pc_lob}, {uops_0_pc_lob}}; // @[util.scala:505:22, :547:21]
assign out_uop_pc_lob = _GEN_45[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_46 = {{uops_7_taken}, {uops_6_taken}, {uops_5_taken}, {uops_4_taken}, {uops_3_taken}, {uops_2_taken}, {uops_1_taken}, {uops_0_taken}}; // @[util.scala:505:22, :547:21]
assign out_uop_taken = _GEN_46[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_47 = {{uops_7_imm_rename}, {uops_6_imm_rename}, {uops_5_imm_rename}, {uops_4_imm_rename}, {uops_3_imm_rename}, {uops_2_imm_rename}, {uops_1_imm_rename}, {uops_0_imm_rename}}; // @[util.scala:505:22, :547:21]
assign out_uop_imm_rename = _GEN_47[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][2:0] _GEN_48 = {{uops_7_imm_sel}, {uops_6_imm_sel}, {uops_5_imm_sel}, {uops_4_imm_sel}, {uops_3_imm_sel}, {uops_2_imm_sel}, {uops_1_imm_sel}, {uops_0_imm_sel}}; // @[util.scala:505:22, :547:21]
assign out_uop_imm_sel = _GEN_48[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][4:0] _GEN_49 = {{uops_7_pimm}, {uops_6_pimm}, {uops_5_pimm}, {uops_4_pimm}, {uops_3_pimm}, {uops_2_pimm}, {uops_1_pimm}, {uops_0_pimm}}; // @[util.scala:505:22, :547:21]
assign out_uop_pimm = _GEN_49[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][19:0] _GEN_50 = {{uops_7_imm_packed}, {uops_6_imm_packed}, {uops_5_imm_packed}, {uops_4_imm_packed}, {uops_3_imm_packed}, {uops_2_imm_packed}, {uops_1_imm_packed}, {uops_0_imm_packed}}; // @[util.scala:505:22, :547:21]
assign out_uop_imm_packed = _GEN_50[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][1:0] _GEN_51 = {{uops_7_op1_sel}, {uops_6_op1_sel}, {uops_5_op1_sel}, {uops_4_op1_sel}, {uops_3_op1_sel}, {uops_2_op1_sel}, {uops_1_op1_sel}, {uops_0_op1_sel}}; // @[util.scala:505:22, :547:21]
assign out_uop_op1_sel = _GEN_51[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][2:0] _GEN_52 = {{uops_7_op2_sel}, {uops_6_op2_sel}, {uops_5_op2_sel}, {uops_4_op2_sel}, {uops_3_op2_sel}, {uops_2_op2_sel}, {uops_1_op2_sel}, {uops_0_op2_sel}}; // @[util.scala:505:22, :547:21]
assign out_uop_op2_sel = _GEN_52[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_53 = {{uops_7_fp_ctrl_ldst}, {uops_6_fp_ctrl_ldst}, {uops_5_fp_ctrl_ldst}, {uops_4_fp_ctrl_ldst}, {uops_3_fp_ctrl_ldst}, {uops_2_fp_ctrl_ldst}, {uops_1_fp_ctrl_ldst}, {uops_0_fp_ctrl_ldst}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_ldst = _GEN_53[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_54 = {{uops_7_fp_ctrl_wen}, {uops_6_fp_ctrl_wen}, {uops_5_fp_ctrl_wen}, {uops_4_fp_ctrl_wen}, {uops_3_fp_ctrl_wen}, {uops_2_fp_ctrl_wen}, {uops_1_fp_ctrl_wen}, {uops_0_fp_ctrl_wen}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_wen = _GEN_54[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_55 = {{uops_7_fp_ctrl_ren1}, {uops_6_fp_ctrl_ren1}, {uops_5_fp_ctrl_ren1}, {uops_4_fp_ctrl_ren1}, {uops_3_fp_ctrl_ren1}, {uops_2_fp_ctrl_ren1}, {uops_1_fp_ctrl_ren1}, {uops_0_fp_ctrl_ren1}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_ren1 = _GEN_55[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_56 = {{uops_7_fp_ctrl_ren2}, {uops_6_fp_ctrl_ren2}, {uops_5_fp_ctrl_ren2}, {uops_4_fp_ctrl_ren2}, {uops_3_fp_ctrl_ren2}, {uops_2_fp_ctrl_ren2}, {uops_1_fp_ctrl_ren2}, {uops_0_fp_ctrl_ren2}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_ren2 = _GEN_56[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_57 = {{uops_7_fp_ctrl_ren3}, {uops_6_fp_ctrl_ren3}, {uops_5_fp_ctrl_ren3}, {uops_4_fp_ctrl_ren3}, {uops_3_fp_ctrl_ren3}, {uops_2_fp_ctrl_ren3}, {uops_1_fp_ctrl_ren3}, {uops_0_fp_ctrl_ren3}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_ren3 = _GEN_57[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_58 = {{uops_7_fp_ctrl_swap12}, {uops_6_fp_ctrl_swap12}, {uops_5_fp_ctrl_swap12}, {uops_4_fp_ctrl_swap12}, {uops_3_fp_ctrl_swap12}, {uops_2_fp_ctrl_swap12}, {uops_1_fp_ctrl_swap12}, {uops_0_fp_ctrl_swap12}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_swap12 = _GEN_58[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_59 = {{uops_7_fp_ctrl_swap23}, {uops_6_fp_ctrl_swap23}, {uops_5_fp_ctrl_swap23}, {uops_4_fp_ctrl_swap23}, {uops_3_fp_ctrl_swap23}, {uops_2_fp_ctrl_swap23}, {uops_1_fp_ctrl_swap23}, {uops_0_fp_ctrl_swap23}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_swap23 = _GEN_59[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][1:0] _GEN_60 = {{uops_7_fp_ctrl_typeTagIn}, {uops_6_fp_ctrl_typeTagIn}, {uops_5_fp_ctrl_typeTagIn}, {uops_4_fp_ctrl_typeTagIn}, {uops_3_fp_ctrl_typeTagIn}, {uops_2_fp_ctrl_typeTagIn}, {uops_1_fp_ctrl_typeTagIn}, {uops_0_fp_ctrl_typeTagIn}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_typeTagIn = _GEN_60[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][1:0] _GEN_61 = {{uops_7_fp_ctrl_typeTagOut}, {uops_6_fp_ctrl_typeTagOut}, {uops_5_fp_ctrl_typeTagOut}, {uops_4_fp_ctrl_typeTagOut}, {uops_3_fp_ctrl_typeTagOut}, {uops_2_fp_ctrl_typeTagOut}, {uops_1_fp_ctrl_typeTagOut}, {uops_0_fp_ctrl_typeTagOut}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_typeTagOut = _GEN_61[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_62 = {{uops_7_fp_ctrl_fromint}, {uops_6_fp_ctrl_fromint}, {uops_5_fp_ctrl_fromint}, {uops_4_fp_ctrl_fromint}, {uops_3_fp_ctrl_fromint}, {uops_2_fp_ctrl_fromint}, {uops_1_fp_ctrl_fromint}, {uops_0_fp_ctrl_fromint}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_fromint = _GEN_62[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_63 = {{uops_7_fp_ctrl_toint}, {uops_6_fp_ctrl_toint}, {uops_5_fp_ctrl_toint}, {uops_4_fp_ctrl_toint}, {uops_3_fp_ctrl_toint}, {uops_2_fp_ctrl_toint}, {uops_1_fp_ctrl_toint}, {uops_0_fp_ctrl_toint}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_toint = _GEN_63[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_64 = {{uops_7_fp_ctrl_fastpipe}, {uops_6_fp_ctrl_fastpipe}, {uops_5_fp_ctrl_fastpipe}, {uops_4_fp_ctrl_fastpipe}, {uops_3_fp_ctrl_fastpipe}, {uops_2_fp_ctrl_fastpipe}, {uops_1_fp_ctrl_fastpipe}, {uops_0_fp_ctrl_fastpipe}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_fastpipe = _GEN_64[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_65 = {{uops_7_fp_ctrl_fma}, {uops_6_fp_ctrl_fma}, {uops_5_fp_ctrl_fma}, {uops_4_fp_ctrl_fma}, {uops_3_fp_ctrl_fma}, {uops_2_fp_ctrl_fma}, {uops_1_fp_ctrl_fma}, {uops_0_fp_ctrl_fma}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_fma = _GEN_65[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_66 = {{uops_7_fp_ctrl_div}, {uops_6_fp_ctrl_div}, {uops_5_fp_ctrl_div}, {uops_4_fp_ctrl_div}, {uops_3_fp_ctrl_div}, {uops_2_fp_ctrl_div}, {uops_1_fp_ctrl_div}, {uops_0_fp_ctrl_div}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_div = _GEN_66[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_67 = {{uops_7_fp_ctrl_sqrt}, {uops_6_fp_ctrl_sqrt}, {uops_5_fp_ctrl_sqrt}, {uops_4_fp_ctrl_sqrt}, {uops_3_fp_ctrl_sqrt}, {uops_2_fp_ctrl_sqrt}, {uops_1_fp_ctrl_sqrt}, {uops_0_fp_ctrl_sqrt}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_sqrt = _GEN_67[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_68 = {{uops_7_fp_ctrl_wflags}, {uops_6_fp_ctrl_wflags}, {uops_5_fp_ctrl_wflags}, {uops_4_fp_ctrl_wflags}, {uops_3_fp_ctrl_wflags}, {uops_2_fp_ctrl_wflags}, {uops_1_fp_ctrl_wflags}, {uops_0_fp_ctrl_wflags}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_wflags = _GEN_68[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_69 = {{uops_7_fp_ctrl_vec}, {uops_6_fp_ctrl_vec}, {uops_5_fp_ctrl_vec}, {uops_4_fp_ctrl_vec}, {uops_3_fp_ctrl_vec}, {uops_2_fp_ctrl_vec}, {uops_1_fp_ctrl_vec}, {uops_0_fp_ctrl_vec}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_vec = _GEN_69[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][5:0] _GEN_70 = {{uops_7_rob_idx}, {uops_6_rob_idx}, {uops_5_rob_idx}, {uops_4_rob_idx}, {uops_3_rob_idx}, {uops_2_rob_idx}, {uops_1_rob_idx}, {uops_0_rob_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_rob_idx = _GEN_70[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][3:0] _GEN_71 = {{uops_7_ldq_idx}, {uops_6_ldq_idx}, {uops_5_ldq_idx}, {uops_4_ldq_idx}, {uops_3_ldq_idx}, {uops_2_ldq_idx}, {uops_1_ldq_idx}, {uops_0_ldq_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_ldq_idx = _GEN_71[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][3:0] _GEN_72 = {{uops_7_stq_idx}, {uops_6_stq_idx}, {uops_5_stq_idx}, {uops_4_stq_idx}, {uops_3_stq_idx}, {uops_2_stq_idx}, {uops_1_stq_idx}, {uops_0_stq_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_stq_idx = _GEN_72[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][1:0] _GEN_73 = {{uops_7_rxq_idx}, {uops_6_rxq_idx}, {uops_5_rxq_idx}, {uops_4_rxq_idx}, {uops_3_rxq_idx}, {uops_2_rxq_idx}, {uops_1_rxq_idx}, {uops_0_rxq_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_rxq_idx = _GEN_73[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][6:0] _GEN_74 = {{uops_7_pdst}, {uops_6_pdst}, {uops_5_pdst}, {uops_4_pdst}, {uops_3_pdst}, {uops_2_pdst}, {uops_1_pdst}, {uops_0_pdst}}; // @[util.scala:505:22, :547:21]
assign out_uop_pdst = _GEN_74[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][6:0] _GEN_75 = {{uops_7_prs1}, {uops_6_prs1}, {uops_5_prs1}, {uops_4_prs1}, {uops_3_prs1}, {uops_2_prs1}, {uops_1_prs1}, {uops_0_prs1}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs1 = _GEN_75[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][6:0] _GEN_76 = {{uops_7_prs2}, {uops_6_prs2}, {uops_5_prs2}, {uops_4_prs2}, {uops_3_prs2}, {uops_2_prs2}, {uops_1_prs2}, {uops_0_prs2}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs2 = _GEN_76[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][6:0] _GEN_77 = {{uops_7_prs3}, {uops_6_prs3}, {uops_5_prs3}, {uops_4_prs3}, {uops_3_prs3}, {uops_2_prs3}, {uops_1_prs3}, {uops_0_prs3}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs3 = _GEN_77[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][4:0] _GEN_78 = {{uops_7_ppred}, {uops_6_ppred}, {uops_5_ppred}, {uops_4_ppred}, {uops_3_ppred}, {uops_2_ppred}, {uops_1_ppred}, {uops_0_ppred}}; // @[util.scala:505:22, :547:21]
assign out_uop_ppred = _GEN_78[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_79 = {{uops_7_prs1_busy}, {uops_6_prs1_busy}, {uops_5_prs1_busy}, {uops_4_prs1_busy}, {uops_3_prs1_busy}, {uops_2_prs1_busy}, {uops_1_prs1_busy}, {uops_0_prs1_busy}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs1_busy = _GEN_79[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_80 = {{uops_7_prs2_busy}, {uops_6_prs2_busy}, {uops_5_prs2_busy}, {uops_4_prs2_busy}, {uops_3_prs2_busy}, {uops_2_prs2_busy}, {uops_1_prs2_busy}, {uops_0_prs2_busy}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs2_busy = _GEN_80[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_81 = {{uops_7_prs3_busy}, {uops_6_prs3_busy}, {uops_5_prs3_busy}, {uops_4_prs3_busy}, {uops_3_prs3_busy}, {uops_2_prs3_busy}, {uops_1_prs3_busy}, {uops_0_prs3_busy}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs3_busy = _GEN_81[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_82 = {{uops_7_ppred_busy}, {uops_6_ppred_busy}, {uops_5_ppred_busy}, {uops_4_ppred_busy}, {uops_3_ppred_busy}, {uops_2_ppred_busy}, {uops_1_ppred_busy}, {uops_0_ppred_busy}}; // @[util.scala:505:22, :547:21]
assign out_uop_ppred_busy = _GEN_82[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][6:0] _GEN_83 = {{uops_7_stale_pdst}, {uops_6_stale_pdst}, {uops_5_stale_pdst}, {uops_4_stale_pdst}, {uops_3_stale_pdst}, {uops_2_stale_pdst}, {uops_1_stale_pdst}, {uops_0_stale_pdst}}; // @[util.scala:505:22, :547:21]
assign out_uop_stale_pdst = _GEN_83[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_84 = {{uops_7_exception}, {uops_6_exception}, {uops_5_exception}, {uops_4_exception}, {uops_3_exception}, {uops_2_exception}, {uops_1_exception}, {uops_0_exception}}; // @[util.scala:505:22, :547:21]
assign out_uop_exception = _GEN_84[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][63:0] _GEN_85 = {{uops_7_exc_cause}, {uops_6_exc_cause}, {uops_5_exc_cause}, {uops_4_exc_cause}, {uops_3_exc_cause}, {uops_2_exc_cause}, {uops_1_exc_cause}, {uops_0_exc_cause}}; // @[util.scala:505:22, :547:21]
assign out_uop_exc_cause = _GEN_85[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][4:0] _GEN_86 = {{uops_7_mem_cmd}, {uops_6_mem_cmd}, {uops_5_mem_cmd}, {uops_4_mem_cmd}, {uops_3_mem_cmd}, {uops_2_mem_cmd}, {uops_1_mem_cmd}, {uops_0_mem_cmd}}; // @[util.scala:505:22, :547:21]
assign out_uop_mem_cmd = _GEN_86[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][1:0] _GEN_87 = {{uops_7_mem_size}, {uops_6_mem_size}, {uops_5_mem_size}, {uops_4_mem_size}, {uops_3_mem_size}, {uops_2_mem_size}, {uops_1_mem_size}, {uops_0_mem_size}}; // @[util.scala:505:22, :547:21]
assign out_uop_mem_size = _GEN_87[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_88 = {{uops_7_mem_signed}, {uops_6_mem_signed}, {uops_5_mem_signed}, {uops_4_mem_signed}, {uops_3_mem_signed}, {uops_2_mem_signed}, {uops_1_mem_signed}, {uops_0_mem_signed}}; // @[util.scala:505:22, :547:21]
assign out_uop_mem_signed = _GEN_88[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_89 = {{uops_7_uses_ldq}, {uops_6_uses_ldq}, {uops_5_uses_ldq}, {uops_4_uses_ldq}, {uops_3_uses_ldq}, {uops_2_uses_ldq}, {uops_1_uses_ldq}, {uops_0_uses_ldq}}; // @[util.scala:505:22, :547:21]
assign out_uop_uses_ldq = _GEN_89[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_90 = {{uops_7_uses_stq}, {uops_6_uses_stq}, {uops_5_uses_stq}, {uops_4_uses_stq}, {uops_3_uses_stq}, {uops_2_uses_stq}, {uops_1_uses_stq}, {uops_0_uses_stq}}; // @[util.scala:505:22, :547:21]
assign out_uop_uses_stq = _GEN_90[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_91 = {{uops_7_is_unique}, {uops_6_is_unique}, {uops_5_is_unique}, {uops_4_is_unique}, {uops_3_is_unique}, {uops_2_is_unique}, {uops_1_is_unique}, {uops_0_is_unique}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_unique = _GEN_91[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_92 = {{uops_7_flush_on_commit}, {uops_6_flush_on_commit}, {uops_5_flush_on_commit}, {uops_4_flush_on_commit}, {uops_3_flush_on_commit}, {uops_2_flush_on_commit}, {uops_1_flush_on_commit}, {uops_0_flush_on_commit}}; // @[util.scala:505:22, :547:21]
assign out_uop_flush_on_commit = _GEN_92[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][2:0] _GEN_93 = {{uops_7_csr_cmd}, {uops_6_csr_cmd}, {uops_5_csr_cmd}, {uops_4_csr_cmd}, {uops_3_csr_cmd}, {uops_2_csr_cmd}, {uops_1_csr_cmd}, {uops_0_csr_cmd}}; // @[util.scala:505:22, :547:21]
assign out_uop_csr_cmd = _GEN_93[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_94 = {{uops_7_ldst_is_rs1}, {uops_6_ldst_is_rs1}, {uops_5_ldst_is_rs1}, {uops_4_ldst_is_rs1}, {uops_3_ldst_is_rs1}, {uops_2_ldst_is_rs1}, {uops_1_ldst_is_rs1}, {uops_0_ldst_is_rs1}}; // @[util.scala:505:22, :547:21]
assign out_uop_ldst_is_rs1 = _GEN_94[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][5:0] _GEN_95 = {{uops_7_ldst}, {uops_6_ldst}, {uops_5_ldst}, {uops_4_ldst}, {uops_3_ldst}, {uops_2_ldst}, {uops_1_ldst}, {uops_0_ldst}}; // @[util.scala:505:22, :547:21]
assign out_uop_ldst = _GEN_95[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][5:0] _GEN_96 = {{uops_7_lrs1}, {uops_6_lrs1}, {uops_5_lrs1}, {uops_4_lrs1}, {uops_3_lrs1}, {uops_2_lrs1}, {uops_1_lrs1}, {uops_0_lrs1}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs1 = _GEN_96[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][5:0] _GEN_97 = {{uops_7_lrs2}, {uops_6_lrs2}, {uops_5_lrs2}, {uops_4_lrs2}, {uops_3_lrs2}, {uops_2_lrs2}, {uops_1_lrs2}, {uops_0_lrs2}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs2 = _GEN_97[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][5:0] _GEN_98 = {{uops_7_lrs3}, {uops_6_lrs3}, {uops_5_lrs3}, {uops_4_lrs3}, {uops_3_lrs3}, {uops_2_lrs3}, {uops_1_lrs3}, {uops_0_lrs3}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs3 = _GEN_98[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][1:0] _GEN_99 = {{uops_7_dst_rtype}, {uops_6_dst_rtype}, {uops_5_dst_rtype}, {uops_4_dst_rtype}, {uops_3_dst_rtype}, {uops_2_dst_rtype}, {uops_1_dst_rtype}, {uops_0_dst_rtype}}; // @[util.scala:505:22, :547:21]
assign out_uop_dst_rtype = _GEN_99[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][1:0] _GEN_100 = {{uops_7_lrs1_rtype}, {uops_6_lrs1_rtype}, {uops_5_lrs1_rtype}, {uops_4_lrs1_rtype}, {uops_3_lrs1_rtype}, {uops_2_lrs1_rtype}, {uops_1_lrs1_rtype}, {uops_0_lrs1_rtype}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs1_rtype = _GEN_100[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][1:0] _GEN_101 = {{uops_7_lrs2_rtype}, {uops_6_lrs2_rtype}, {uops_5_lrs2_rtype}, {uops_4_lrs2_rtype}, {uops_3_lrs2_rtype}, {uops_2_lrs2_rtype}, {uops_1_lrs2_rtype}, {uops_0_lrs2_rtype}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs2_rtype = _GEN_101[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_102 = {{uops_7_frs3_en}, {uops_6_frs3_en}, {uops_5_frs3_en}, {uops_4_frs3_en}, {uops_3_frs3_en}, {uops_2_frs3_en}, {uops_1_frs3_en}, {uops_0_frs3_en}}; // @[util.scala:505:22, :547:21]
assign out_uop_frs3_en = _GEN_102[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_103 = {{uops_7_fcn_dw}, {uops_6_fcn_dw}, {uops_5_fcn_dw}, {uops_4_fcn_dw}, {uops_3_fcn_dw}, {uops_2_fcn_dw}, {uops_1_fcn_dw}, {uops_0_fcn_dw}}; // @[util.scala:505:22, :547:21]
assign out_uop_fcn_dw = _GEN_103[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][4:0] _GEN_104 = {{uops_7_fcn_op}, {uops_6_fcn_op}, {uops_5_fcn_op}, {uops_4_fcn_op}, {uops_3_fcn_op}, {uops_2_fcn_op}, {uops_1_fcn_op}, {uops_0_fcn_op}}; // @[util.scala:505:22, :547:21]
assign out_uop_fcn_op = _GEN_104[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_105 = {{uops_7_fp_val}, {uops_6_fp_val}, {uops_5_fp_val}, {uops_4_fp_val}, {uops_3_fp_val}, {uops_2_fp_val}, {uops_1_fp_val}, {uops_0_fp_val}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_val = _GEN_105[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][2:0] _GEN_106 = {{uops_7_fp_rm}, {uops_6_fp_rm}, {uops_5_fp_rm}, {uops_4_fp_rm}, {uops_3_fp_rm}, {uops_2_fp_rm}, {uops_1_fp_rm}, {uops_0_fp_rm}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_rm = _GEN_106[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][1:0] _GEN_107 = {{uops_7_fp_typ}, {uops_6_fp_typ}, {uops_5_fp_typ}, {uops_4_fp_typ}, {uops_3_fp_typ}, {uops_2_fp_typ}, {uops_1_fp_typ}, {uops_0_fp_typ}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_typ = _GEN_107[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_108 = {{uops_7_xcpt_pf_if}, {uops_6_xcpt_pf_if}, {uops_5_xcpt_pf_if}, {uops_4_xcpt_pf_if}, {uops_3_xcpt_pf_if}, {uops_2_xcpt_pf_if}, {uops_1_xcpt_pf_if}, {uops_0_xcpt_pf_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_xcpt_pf_if = _GEN_108[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_109 = {{uops_7_xcpt_ae_if}, {uops_6_xcpt_ae_if}, {uops_5_xcpt_ae_if}, {uops_4_xcpt_ae_if}, {uops_3_xcpt_ae_if}, {uops_2_xcpt_ae_if}, {uops_1_xcpt_ae_if}, {uops_0_xcpt_ae_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_xcpt_ae_if = _GEN_109[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_110 = {{uops_7_xcpt_ma_if}, {uops_6_xcpt_ma_if}, {uops_5_xcpt_ma_if}, {uops_4_xcpt_ma_if}, {uops_3_xcpt_ma_if}, {uops_2_xcpt_ma_if}, {uops_1_xcpt_ma_if}, {uops_0_xcpt_ma_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_xcpt_ma_if = _GEN_110[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_111 = {{uops_7_bp_debug_if}, {uops_6_bp_debug_if}, {uops_5_bp_debug_if}, {uops_4_bp_debug_if}, {uops_3_bp_debug_if}, {uops_2_bp_debug_if}, {uops_1_bp_debug_if}, {uops_0_bp_debug_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_bp_debug_if = _GEN_111[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_112 = {{uops_7_bp_xcpt_if}, {uops_6_bp_xcpt_if}, {uops_5_bp_xcpt_if}, {uops_4_bp_xcpt_if}, {uops_3_bp_xcpt_if}, {uops_2_bp_xcpt_if}, {uops_1_bp_xcpt_if}, {uops_0_bp_xcpt_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_bp_xcpt_if = _GEN_112[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][2:0] _GEN_113 = {{uops_7_debug_fsrc}, {uops_6_debug_fsrc}, {uops_5_debug_fsrc}, {uops_4_debug_fsrc}, {uops_3_debug_fsrc}, {uops_2_debug_fsrc}, {uops_1_debug_fsrc}, {uops_0_debug_fsrc}}; // @[util.scala:505:22, :547:21]
assign out_uop_debug_fsrc = _GEN_113[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][2:0] _GEN_114 = {{uops_7_debug_tsrc}, {uops_6_debug_tsrc}, {uops_5_debug_tsrc}, {uops_4_debug_tsrc}, {uops_3_debug_tsrc}, {uops_2_debug_tsrc}, {uops_1_debug_tsrc}, {uops_0_debug_tsrc}}; // @[util.scala:505:22, :547:21]
assign out_uop_debug_tsrc = _GEN_114[deq_ptr_value]; // @[Counter.scala:61:40]
wire _io_deq_valid_T = ~io_empty; // @[util.scala:458:7, :515:71, :548:32]
assign _io_deq_valid_T_1 = _io_deq_valid_T & _GEN_1; // @[util.scala:515:44, :548:{32,42}]
assign io_deq_valid_0 = _io_deq_valid_T_1; // @[util.scala:458:7, :548:42]
wire [3:0] _ptr_diff_T = _GEN_2 - _GEN_3; // @[Counter.scala:77:24]
wire [2:0] ptr_diff = _ptr_diff_T[2:0]; // @[util.scala:551:34]
wire [3:0] _io_count_T_1 = {_io_count_T, ptr_diff}; // @[util.scala:551:34, :553:{22,34}]
assign io_count_0 = _io_count_T_1[2:0]; // @[util.scala:458:7, :553:{16,22}]
wire _GEN_115 = enq_ptr_value == 3'h0; // @[Counter.scala:61:40]
wire _GEN_116 = do_enq & _GEN_115; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_117 = enq_ptr_value == 3'h1; // @[Counter.scala:61:40]
wire _GEN_118 = do_enq & _GEN_117; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_119 = enq_ptr_value == 3'h2; // @[Counter.scala:61:40]
wire _GEN_120 = do_enq & _GEN_119; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_121 = enq_ptr_value == 3'h3; // @[Counter.scala:61:40]
wire _GEN_122 = do_enq & _GEN_121; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_123 = enq_ptr_value == 3'h4; // @[Counter.scala:61:40]
wire _GEN_124 = do_enq & _GEN_123; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_125 = enq_ptr_value == 3'h5; // @[Counter.scala:61:40]
wire _GEN_126 = do_enq & _GEN_125; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_127 = enq_ptr_value == 3'h6; // @[Counter.scala:61:40]
wire _GEN_128 = do_enq & _GEN_127; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_129 = do_enq & (&enq_ptr_value); // @[Counter.scala:61:40]
always @(posedge clock) begin // @[util.scala:458:7]
if (reset) begin // @[util.scala:458:7]
valids_0 <= 1'h0; // @[util.scala:504:26]
valids_1 <= 1'h0; // @[util.scala:504:26]
valids_2 <= 1'h0; // @[util.scala:504:26]
valids_3 <= 1'h0; // @[util.scala:504:26]
valids_4 <= 1'h0; // @[util.scala:504:26]
valids_5 <= 1'h0; // @[util.scala:504:26]
valids_6 <= 1'h0; // @[util.scala:504:26]
valids_7 <= 1'h0; // @[util.scala:504:26]
enq_ptr_value <= 3'h0; // @[Counter.scala:61:40]
deq_ptr_value <= 3'h0; // @[Counter.scala:61:40]
maybe_full <= 1'h0; // @[util.scala:509:29]
end
else begin // @[util.scala:458:7]
valids_0 <= ~(do_deq & deq_ptr_value == 3'h0) & (_GEN_116 | _valids_0_T_7); // @[Counter.scala:61:40]
valids_1 <= ~(do_deq & deq_ptr_value == 3'h1) & (_GEN_118 | _valids_1_T_7); // @[Counter.scala:61:40]
valids_2 <= ~(do_deq & deq_ptr_value == 3'h2) & (_GEN_120 | _valids_2_T_7); // @[Counter.scala:61:40]
valids_3 <= ~(do_deq & deq_ptr_value == 3'h3) & (_GEN_122 | _valids_3_T_7); // @[Counter.scala:61:40]
valids_4 <= ~(do_deq & deq_ptr_value == 3'h4) & (_GEN_124 | _valids_4_T_7); // @[Counter.scala:61:40]
valids_5 <= ~(do_deq & deq_ptr_value == 3'h5) & (_GEN_126 | _valids_5_T_7); // @[Counter.scala:61:40]
valids_6 <= ~(do_deq & deq_ptr_value == 3'h6) & (_GEN_128 | _valids_6_T_7); // @[Counter.scala:61:40]
valids_7 <= ~(do_deq & (&deq_ptr_value)) & (_GEN_129 | _valids_7_T_7); // @[Counter.scala:61:40]
if (do_enq) // @[util.scala:514:26]
enq_ptr_value <= _value_T_1; // @[Counter.scala:61:40, :77:24]
if (do_deq) // @[util.scala:515:26]
deq_ptr_value <= _value_T_3; // @[Counter.scala:61:40, :77:24]
if (~(do_enq == do_deq)) // @[util.scala:509:29, :514:26, :515:26, :539:{18,30}, :540:18]
maybe_full <= do_enq; // @[util.scala:509:29, :514:26]
end
if (_GEN_116) begin // @[util.scala:520:18, :526:19, :528:35]
uops_0_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_0_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_0_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_0_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_0_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_0_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_0_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_0_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_0_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_0_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_0_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_0_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_0_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_0_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_0_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_0_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_0_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_0_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_0_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_0_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_0_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_0_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_0_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_0_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_0_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_0_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_0_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_0_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_0_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_0_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_0_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_0_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_0_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_0_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_0_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_0_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_0_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_0_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_0_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_0_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_0_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_0_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_0_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_0_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_0_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_0_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_0_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_0_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_0_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_0_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_0_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_0_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_0_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_0_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_0_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_0_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_0_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_0_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_0_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_0_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_0_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_0_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_0_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_0_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_0_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_0_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_0_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_0_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_0_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_0_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_0_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_0_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_0_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_0_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_0_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_0_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_0_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_0_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_115) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_0_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_0) // @[util.scala:504:26]
uops_0_br_mask <= _uops_0_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_118) begin // @[util.scala:520:18, :526:19, :528:35]
uops_1_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_1_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_1_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_1_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_1_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_1_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_1_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_1_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_1_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_1_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_1_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_1_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_1_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_1_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_1_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_1_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_1_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_1_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_1_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_1_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_1_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_1_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_1_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_1_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_1_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_1_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_1_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_1_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_1_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_1_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_1_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_1_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_1_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_1_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_1_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_1_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_1_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_1_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_1_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_1_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_1_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_1_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_1_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_1_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_1_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_1_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_1_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_1_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_1_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_1_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_1_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_1_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_1_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_1_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_1_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_1_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_1_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_1_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_1_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_1_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_1_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_1_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_1_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_1_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_1_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_1_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_1_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_1_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_1_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_1_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_1_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_1_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_1_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_1_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_1_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_1_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_1_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_1_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_117) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_1_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_1) // @[util.scala:504:26]
uops_1_br_mask <= _uops_1_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_120) begin // @[util.scala:520:18, :526:19, :528:35]
uops_2_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_2_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_2_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_2_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_2_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_2_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_2_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_2_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_2_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_2_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_2_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_2_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_2_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_2_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_2_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_2_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_2_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_2_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_2_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_2_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_2_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_2_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_2_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_2_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_2_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_2_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_2_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_2_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_2_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_2_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_2_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_2_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_2_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_2_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_2_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_2_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_2_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_2_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_2_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_2_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_2_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_2_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_2_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_2_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_2_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_2_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_2_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_2_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_2_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_2_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_2_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_2_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_2_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_2_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_2_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_2_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_2_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_2_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_2_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_2_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_2_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_2_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_2_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_2_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_2_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_2_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_2_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_2_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_2_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_2_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_2_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_2_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_2_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_2_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_2_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_2_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_2_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_2_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_119) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_2_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_2) // @[util.scala:504:26]
uops_2_br_mask <= _uops_2_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_122) begin // @[util.scala:520:18, :526:19, :528:35]
uops_3_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_3_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_3_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_3_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_3_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_3_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_3_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_3_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_3_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_3_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_3_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_3_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_3_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_3_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_3_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_3_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_3_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_3_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_3_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_3_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_3_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_3_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_3_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_3_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_3_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_3_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_3_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_3_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_3_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_3_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_3_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_3_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_3_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_3_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_3_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_3_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_3_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_3_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_3_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_3_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_3_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_3_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_3_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_3_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_3_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_3_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_3_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_3_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_3_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_3_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_3_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_3_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_3_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_3_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_3_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_3_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_3_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_3_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_3_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_3_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_3_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_3_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_3_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_3_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_3_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_3_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_3_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_3_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_3_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_3_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_3_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_3_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_3_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_3_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_3_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_3_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_3_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_3_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_121) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_3_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_3) // @[util.scala:504:26]
uops_3_br_mask <= _uops_3_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_124) begin // @[util.scala:520:18, :526:19, :528:35]
uops_4_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_4_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_4_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_4_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_4_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_4_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_4_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_4_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_4_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_4_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_4_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_4_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_4_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_4_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_4_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_4_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_4_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_4_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_4_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_4_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_4_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_4_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_4_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_4_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_4_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_4_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_4_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_4_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_4_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_4_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_4_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_4_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_4_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_4_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_4_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_4_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_4_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_4_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_4_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_4_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_4_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_4_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_4_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_4_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_4_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_4_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_4_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_4_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_4_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_4_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_4_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_4_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_4_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_4_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_4_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_4_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_4_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_4_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_4_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_4_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_4_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_4_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_4_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_4_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_4_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_4_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_4_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_4_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_4_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_4_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_4_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_4_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_4_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_4_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_4_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_4_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_4_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_4_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_123) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_4_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_4) // @[util.scala:504:26]
uops_4_br_mask <= _uops_4_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_126) begin // @[util.scala:520:18, :526:19, :528:35]
uops_5_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_5_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_5_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_5_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_5_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_5_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_5_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_5_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_5_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_5_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_5_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_5_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_5_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_5_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_5_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_5_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_5_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_5_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_5_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_5_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_5_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_5_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_5_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_5_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_5_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_5_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_5_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_5_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_5_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_5_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_5_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_5_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_5_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_5_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_5_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_5_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_5_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_5_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_5_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_5_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_5_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_5_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_5_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_5_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_5_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_5_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_5_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_5_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_5_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_5_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_5_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_5_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_5_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_5_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_5_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_5_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_5_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_5_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_5_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_5_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_5_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_5_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_5_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_5_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_5_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_5_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_5_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_5_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_5_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_5_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_5_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_5_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_5_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_5_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_5_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_5_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_5_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_5_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_125) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_5_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_5) // @[util.scala:504:26]
uops_5_br_mask <= _uops_5_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_128) begin // @[util.scala:520:18, :526:19, :528:35]
uops_6_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_6_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_6_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_6_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_6_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_6_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_6_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_6_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_6_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_6_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_6_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_6_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_6_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_6_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_6_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_6_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_6_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_6_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_6_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_6_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_6_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_6_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_6_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_6_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_6_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_6_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_6_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_6_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_6_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_6_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_6_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_6_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_6_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_6_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_6_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_6_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_6_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_6_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_6_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_6_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_6_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_6_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_6_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_6_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_6_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_6_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_6_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_6_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_6_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_6_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_6_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_6_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_6_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_6_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_6_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_6_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_6_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_6_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_6_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_6_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_6_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_6_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_6_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_6_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_6_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_6_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_6_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_6_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_6_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_6_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_6_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_6_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_6_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_6_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_6_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_6_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_6_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_6_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_127) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_6_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_6) // @[util.scala:504:26]
uops_6_br_mask <= _uops_6_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_129) begin // @[util.scala:520:18, :526:19, :528:35]
uops_7_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_7_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_7_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_7_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_7_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_7_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_7_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_7_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_7_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_7_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_7_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_7_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_7_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_7_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_7_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_7_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_7_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_7_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_7_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_7_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_7_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_7_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_7_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_7_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_7_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_7_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_7_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_7_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_7_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_7_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_7_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_7_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_7_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_7_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_7_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_7_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_7_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_7_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_7_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_7_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_7_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_7_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_7_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_7_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_7_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_7_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_7_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_7_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_7_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_7_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_7_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_7_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_7_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_7_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_7_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_7_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_7_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_7_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_7_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_7_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_7_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_7_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_7_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_7_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_7_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_7_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_7_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_7_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_7_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_7_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_7_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_7_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_7_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_7_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_7_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_7_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_7_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_7_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & (&enq_ptr_value)) // @[Counter.scala:61:40]
uops_7_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_7) // @[util.scala:504:26]
uops_7_br_mask <= _uops_7_br_mask_T_1; // @[util.scala:97:21, :505:22]
always @(posedge)
ram_8x72 ram_ext ( // @[util.scala:503:22]
.R0_addr (deq_ptr_value), // @[Counter.scala:61:40]
.R0_en (1'h1),
.R0_clk (clock),
.R0_data (_ram_ext_R0_data),
.W0_addr (enq_ptr_value), // @[Counter.scala:61:40]
.W0_en (do_enq), // @[util.scala:514:26]
.W0_clk (clock),
.W0_data ({io_enq_bits_fflags_bits_0, io_enq_bits_fflags_valid_0, 1'h0, io_enq_bits_data_0}) // @[util.scala:458:7, :503:22]
); // @[util.scala:503:22]
assign io_enq_ready = io_enq_ready_0; // @[util.scala:458:7]
assign io_deq_valid = io_deq_valid_0; // @[util.scala:458:7]
assign io_deq_bits_uop_inst = io_deq_bits_uop_inst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_debug_inst = io_deq_bits_uop_debug_inst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_rvc = io_deq_bits_uop_is_rvc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_debug_pc = io_deq_bits_uop_debug_pc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iq_type_0 = io_deq_bits_uop_iq_type_0_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iq_type_1 = io_deq_bits_uop_iq_type_1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iq_type_2 = io_deq_bits_uop_iq_type_2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iq_type_3 = io_deq_bits_uop_iq_type_3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_0 = io_deq_bits_uop_fu_code_0_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_1 = io_deq_bits_uop_fu_code_1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_2 = io_deq_bits_uop_fu_code_2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_3 = io_deq_bits_uop_fu_code_3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_4 = io_deq_bits_uop_fu_code_4_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_5 = io_deq_bits_uop_fu_code_5_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_6 = io_deq_bits_uop_fu_code_6_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_7 = io_deq_bits_uop_fu_code_7_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_8 = io_deq_bits_uop_fu_code_8_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_9 = io_deq_bits_uop_fu_code_9_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_issued = io_deq_bits_uop_iw_issued_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_issued_partial_agen = io_deq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_issued_partial_dgen = io_deq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p1_speculative_child = io_deq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p2_speculative_child = io_deq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p1_bypass_hint = io_deq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p2_bypass_hint = io_deq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p3_bypass_hint = io_deq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_dis_col_sel = io_deq_bits_uop_dis_col_sel_0; // @[util.scala:458:7]
assign io_deq_bits_uop_br_mask = io_deq_bits_uop_br_mask_0; // @[util.scala:458:7]
assign io_deq_bits_uop_br_tag = io_deq_bits_uop_br_tag_0; // @[util.scala:458:7]
assign io_deq_bits_uop_br_type = io_deq_bits_uop_br_type_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_sfb = io_deq_bits_uop_is_sfb_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_fence = io_deq_bits_uop_is_fence_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_fencei = io_deq_bits_uop_is_fencei_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_sfence = io_deq_bits_uop_is_sfence_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_amo = io_deq_bits_uop_is_amo_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_eret = io_deq_bits_uop_is_eret_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_sys_pc2epc = io_deq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_rocc = io_deq_bits_uop_is_rocc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_mov = io_deq_bits_uop_is_mov_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ftq_idx = io_deq_bits_uop_ftq_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_edge_inst = io_deq_bits_uop_edge_inst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_pc_lob = io_deq_bits_uop_pc_lob_0; // @[util.scala:458:7]
assign io_deq_bits_uop_taken = io_deq_bits_uop_taken_0; // @[util.scala:458:7]
assign io_deq_bits_uop_imm_rename = io_deq_bits_uop_imm_rename_0; // @[util.scala:458:7]
assign io_deq_bits_uop_imm_sel = io_deq_bits_uop_imm_sel_0; // @[util.scala:458:7]
assign io_deq_bits_uop_pimm = io_deq_bits_uop_pimm_0; // @[util.scala:458:7]
assign io_deq_bits_uop_imm_packed = io_deq_bits_uop_imm_packed_0; // @[util.scala:458:7]
assign io_deq_bits_uop_op1_sel = io_deq_bits_uop_op1_sel_0; // @[util.scala:458:7]
assign io_deq_bits_uop_op2_sel = io_deq_bits_uop_op2_sel_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_ldst = io_deq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_wen = io_deq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_ren1 = io_deq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_ren2 = io_deq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_ren3 = io_deq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_swap12 = io_deq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_swap23 = io_deq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_typeTagIn = io_deq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_typeTagOut = io_deq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_fromint = io_deq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_toint = io_deq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_fastpipe = io_deq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_fma = io_deq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_div = io_deq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_sqrt = io_deq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_wflags = io_deq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_vec = io_deq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7]
assign io_deq_bits_uop_rob_idx = io_deq_bits_uop_rob_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ldq_idx = io_deq_bits_uop_ldq_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_stq_idx = io_deq_bits_uop_stq_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_rxq_idx = io_deq_bits_uop_rxq_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_pdst = io_deq_bits_uop_pdst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs1 = io_deq_bits_uop_prs1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs2 = io_deq_bits_uop_prs2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs3 = io_deq_bits_uop_prs3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ppred = io_deq_bits_uop_ppred_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs1_busy = io_deq_bits_uop_prs1_busy_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs2_busy = io_deq_bits_uop_prs2_busy_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs3_busy = io_deq_bits_uop_prs3_busy_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ppred_busy = io_deq_bits_uop_ppred_busy_0; // @[util.scala:458:7]
assign io_deq_bits_uop_stale_pdst = io_deq_bits_uop_stale_pdst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_exception = io_deq_bits_uop_exception_0; // @[util.scala:458:7]
assign io_deq_bits_uop_exc_cause = io_deq_bits_uop_exc_cause_0; // @[util.scala:458:7]
assign io_deq_bits_uop_mem_cmd = io_deq_bits_uop_mem_cmd_0; // @[util.scala:458:7]
assign io_deq_bits_uop_mem_size = io_deq_bits_uop_mem_size_0; // @[util.scala:458:7]
assign io_deq_bits_uop_mem_signed = io_deq_bits_uop_mem_signed_0; // @[util.scala:458:7]
assign io_deq_bits_uop_uses_ldq = io_deq_bits_uop_uses_ldq_0; // @[util.scala:458:7]
assign io_deq_bits_uop_uses_stq = io_deq_bits_uop_uses_stq_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_unique = io_deq_bits_uop_is_unique_0; // @[util.scala:458:7]
assign io_deq_bits_uop_flush_on_commit = io_deq_bits_uop_flush_on_commit_0; // @[util.scala:458:7]
assign io_deq_bits_uop_csr_cmd = io_deq_bits_uop_csr_cmd_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ldst_is_rs1 = io_deq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ldst = io_deq_bits_uop_ldst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs1 = io_deq_bits_uop_lrs1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs2 = io_deq_bits_uop_lrs2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs3 = io_deq_bits_uop_lrs3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_dst_rtype = io_deq_bits_uop_dst_rtype_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs1_rtype = io_deq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs2_rtype = io_deq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7]
assign io_deq_bits_uop_frs3_en = io_deq_bits_uop_frs3_en_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fcn_dw = io_deq_bits_uop_fcn_dw_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fcn_op = io_deq_bits_uop_fcn_op_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_val = io_deq_bits_uop_fp_val_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_rm = io_deq_bits_uop_fp_rm_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_typ = io_deq_bits_uop_fp_typ_0; // @[util.scala:458:7]
assign io_deq_bits_uop_xcpt_pf_if = io_deq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_xcpt_ae_if = io_deq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_xcpt_ma_if = io_deq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_bp_debug_if = io_deq_bits_uop_bp_debug_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_bp_xcpt_if = io_deq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_debug_fsrc = io_deq_bits_uop_debug_fsrc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_debug_tsrc = io_deq_bits_uop_debug_tsrc_0; // @[util.scala:458:7]
assign io_deq_bits_data = io_deq_bits_data_0; // @[util.scala:458:7]
assign io_deq_bits_predicated = io_deq_bits_predicated_0; // @[util.scala:458:7]
assign io_deq_bits_fflags_valid = io_deq_bits_fflags_valid_0; // @[util.scala:458:7]
assign io_deq_bits_fflags_bits = io_deq_bits_fflags_bits_0; // @[util.scala:458:7]
assign io_count = io_count_0; // @[util.scala:458:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File WidthWidget.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.AddressSet
import freechips.rocketchip.util.{Repeater, UIntToOH1}
// innBeatBytes => the new client-facing bus width
class TLWidthWidget(innerBeatBytes: Int)(implicit p: Parameters) extends LazyModule
{
private def noChangeRequired(manager: TLManagerPortParameters) = manager.beatBytes == innerBeatBytes
val node = new TLAdapterNode(
clientFn = { case c => c },
managerFn = { case m => m.v1copy(beatBytes = innerBeatBytes) }){
override def circuitIdentity = edges.out.map(_.manager).forall(noChangeRequired)
}
override lazy val desiredName = s"TLWidthWidget$innerBeatBytes"
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def merge[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T]) = {
val inBytes = edgeIn.manager.beatBytes
val outBytes = edgeOut.manager.beatBytes
val ratio = outBytes / inBytes
val keepBits = log2Ceil(outBytes)
val dropBits = log2Ceil(inBytes)
val countBits = log2Ceil(ratio)
val size = edgeIn.size(in.bits)
val hasData = edgeIn.hasData(in.bits)
val limit = UIntToOH1(size, keepBits) >> dropBits
val count = RegInit(0.U(countBits.W))
val first = count === 0.U
val last = count === limit || !hasData
val enable = Seq.tabulate(ratio) { i => !((count ^ i.U) & limit).orR }
val corrupt_reg = RegInit(false.B)
val corrupt_in = edgeIn.corrupt(in.bits)
val corrupt_out = corrupt_in || corrupt_reg
when (in.fire) {
count := count + 1.U
corrupt_reg := corrupt_out
when (last) {
count := 0.U
corrupt_reg := false.B
}
}
def helper(idata: UInt): UInt = {
// rdata is X until the first time a multi-beat write occurs.
// Prevent the X from leaking outside by jamming the mux control until
// the first time rdata is written (and hence no longer X).
val rdata_written_once = RegInit(false.B)
val masked_enable = enable.map(_ || !rdata_written_once)
val odata = Seq.fill(ratio) { WireInit(idata) }
val rdata = Reg(Vec(ratio-1, chiselTypeOf(idata)))
val pdata = rdata :+ idata
val mdata = (masked_enable zip (odata zip pdata)) map { case (e, (o, p)) => Mux(e, o, p) }
when (in.fire && !last) {
rdata_written_once := true.B
(rdata zip mdata) foreach { case (r, m) => r := m }
}
Cat(mdata.reverse)
}
in.ready := out.ready || !last
out.valid := in.valid && last
out.bits := in.bits
// Don't put down hardware if we never carry data
edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits)))
edgeOut.corrupt(out.bits) := corrupt_out
(out.bits, in.bits) match {
case (o: TLBundleA, i: TLBundleA) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W))
case (o: TLBundleB, i: TLBundleB) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W))
case (o: TLBundleC, i: TLBundleC) => ()
case (o: TLBundleD, i: TLBundleD) => ()
case _ => require(false, "Impossible bundle combination in WidthWidget")
}
}
def split[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = {
val inBytes = edgeIn.manager.beatBytes
val outBytes = edgeOut.manager.beatBytes
val ratio = inBytes / outBytes
val keepBits = log2Ceil(inBytes)
val dropBits = log2Ceil(outBytes)
val countBits = log2Ceil(ratio)
val size = edgeIn.size(in.bits)
val hasData = edgeIn.hasData(in.bits)
val limit = UIntToOH1(size, keepBits) >> dropBits
val count = RegInit(0.U(countBits.W))
val first = count === 0.U
val last = count === limit || !hasData
when (out.fire) {
count := count + 1.U
when (last) { count := 0.U }
}
// For sub-beat transfer, extract which part matters
val sel = in.bits match {
case a: TLBundleA => a.address(keepBits-1, dropBits)
case b: TLBundleB => b.address(keepBits-1, dropBits)
case c: TLBundleC => c.address(keepBits-1, dropBits)
case d: TLBundleD => {
val sel = sourceMap(d.source)
val hold = Mux(first, sel, RegEnable(sel, first)) // a_first is not for whole xfer
hold & ~limit // if more than one a_first/xfer, the address must be aligned anyway
}
}
val index = sel | count
def helper(idata: UInt, width: Int): UInt = {
val mux = VecInit.tabulate(ratio) { i => idata((i+1)*outBytes*width-1, i*outBytes*width) }
mux(index)
}
out.bits := in.bits
out.valid := in.valid
in.ready := out.ready
// Don't put down hardware if we never carry data
edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits), 8))
(out.bits, in.bits) match {
case (o: TLBundleA, i: TLBundleA) => o.mask := helper(i.mask, 1)
case (o: TLBundleB, i: TLBundleB) => o.mask := helper(i.mask, 1)
case (o: TLBundleC, i: TLBundleC) => () // replicating corrupt to all beats is ok
case (o: TLBundleD, i: TLBundleD) => ()
case _ => require(false, "Impossbile bundle combination in WidthWidget")
}
// Repeat the input if we're not last
!last
}
def splice[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = {
if (edgeIn.manager.beatBytes == edgeOut.manager.beatBytes) {
// nothing to do; pass it through
out.bits := in.bits
out.valid := in.valid
in.ready := out.ready
} else if (edgeIn.manager.beatBytes > edgeOut.manager.beatBytes) {
// split input to output
val repeat = Wire(Bool())
val repeated = Repeater(in, repeat)
val cated = Wire(chiselTypeOf(repeated))
cated <> repeated
edgeIn.data(cated.bits) := Cat(
edgeIn.data(repeated.bits)(edgeIn.manager.beatBytes*8-1, edgeOut.manager.beatBytes*8),
edgeIn.data(in.bits)(edgeOut.manager.beatBytes*8-1, 0))
repeat := split(edgeIn, cated, edgeOut, out, sourceMap)
} else {
// merge input to output
merge(edgeIn, in, edgeOut, out)
}
}
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
// If the master is narrower than the slave, the D channel must be narrowed.
// This is tricky, because the D channel has no address data.
// Thus, you don't know which part of a sub-beat transfer to extract.
// To fix this, we record the relevant address bits for all sources.
// The assumption is that this sort of situation happens only where
// you connect a narrow master to the system bus, so there are few sources.
def sourceMap(source_bits: UInt) = {
val source = if (edgeIn.client.endSourceId == 1) 0.U(0.W) else source_bits
require (edgeOut.manager.beatBytes > edgeIn.manager.beatBytes)
val keepBits = log2Ceil(edgeOut.manager.beatBytes)
val dropBits = log2Ceil(edgeIn.manager.beatBytes)
val sources = Reg(Vec(edgeIn.client.endSourceId, UInt((keepBits-dropBits).W)))
val a_sel = in.a.bits.address(keepBits-1, dropBits)
when (in.a.fire) {
if (edgeIn.client.endSourceId == 1) { // avoid extraction-index-width warning
sources(0) := a_sel
} else {
sources(in.a.bits.source) := a_sel
}
}
// depopulate unused source registers:
edgeIn.client.unusedSources.foreach { id => sources(id) := 0.U }
val bypass = in.a.valid && in.a.bits.source === source
if (edgeIn.manager.minLatency > 0) sources(source)
else Mux(bypass, a_sel, sources(source))
}
splice(edgeIn, in.a, edgeOut, out.a, sourceMap)
splice(edgeOut, out.d, edgeIn, in.d, sourceMap)
if (edgeOut.manager.anySupportAcquireB && edgeIn.client.anySupportProbe) {
splice(edgeOut, out.b, edgeIn, in.b, sourceMap)
splice(edgeIn, in.c, edgeOut, out.c, sourceMap)
out.e.valid := in.e.valid
out.e.bits := in.e.bits
in.e.ready := out.e.ready
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLWidthWidget
{
def apply(innerBeatBytes: Int)(implicit p: Parameters): TLNode =
{
val widget = LazyModule(new TLWidthWidget(innerBeatBytes))
widget.node
}
def apply(wrapper: TLBusWrapper)(implicit p: Parameters): TLNode = apply(wrapper.beatBytes)
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMWidthWidget(first: Int, second: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("WidthWidget"))
val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff)))
(ram.node
:= TLDelayer(0.1)
:= TLFragmenter(4, 256)
:= TLWidthWidget(second)
:= TLWidthWidget(first)
:= TLDelayer(0.1)
:= model.node
:= fuzz.node)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMWidthWidgetTest(little: Int, big: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMWidthWidget(little,big,txns)).module)
dut.io.start := DontCare
io.finished := dut.io.finished
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File Repeater.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{Decoupled, DecoupledIO}
// A Repeater passes its input to its output, unless repeat is asserted.
// When repeat is asserted, the Repeater copies the input and repeats it next cycle.
class Repeater[T <: Data](gen: T) extends Module
{
override def desiredName = s"Repeater_${gen.typeName}"
val io = IO( new Bundle {
val repeat = Input(Bool())
val full = Output(Bool())
val enq = Flipped(Decoupled(gen.cloneType))
val deq = Decoupled(gen.cloneType)
} )
val full = RegInit(false.B)
val saved = Reg(gen.cloneType)
// When !full, a repeater is pass-through
io.deq.valid := io.enq.valid || full
io.enq.ready := io.deq.ready && !full
io.deq.bits := Mux(full, saved, io.enq.bits)
io.full := full
when (io.enq.fire && io.repeat) { full := true.B; saved := io.enq.bits }
when (io.deq.fire && !io.repeat) { full := false.B }
}
object Repeater
{
def apply[T <: Data](enq: DecoupledIO[T], repeat: Bool): DecoupledIO[T] = {
val repeater = Module(new Repeater(chiselTypeOf(enq.bits)))
repeater.io.repeat := repeat
repeater.io.enq <> enq
repeater.io.deq
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module TLWidthWidget32_17( // @[WidthWidget.scala:27:9]
input clock, // @[WidthWidget.scala:27:9]
input reset, // @[WidthWidget.scala:27:9]
output auto_anon_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_anon_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_anon_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_anon_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_anon_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [255:0] auto_anon_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_anon_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_anon_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_anon_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [255:0] auto_anon_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_anon_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_anon_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_anon_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_anon_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_anon_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_anon_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_anon_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_anon_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_anon_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_d_bits_corrupt // @[LazyModuleImp.scala:107:25]
);
wire [255:0] _repeated_repeater_io_deq_bits_data; // @[Repeater.scala:36:26]
wire auto_anon_in_a_valid_0 = auto_anon_in_a_valid; // @[WidthWidget.scala:27:9]
wire [2:0] auto_anon_in_a_bits_opcode_0 = auto_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [2:0] auto_anon_in_a_bits_param_0 = auto_anon_in_a_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] auto_anon_in_a_bits_size_0 = auto_anon_in_a_bits_size; // @[WidthWidget.scala:27:9]
wire [4:0] auto_anon_in_a_bits_source_0 = auto_anon_in_a_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] auto_anon_in_a_bits_address_0 = auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9]
wire [31:0] auto_anon_in_a_bits_mask_0 = auto_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [255:0] auto_anon_in_a_bits_data_0 = auto_anon_in_a_bits_data; // @[WidthWidget.scala:27:9]
wire auto_anon_in_a_bits_corrupt_0 = auto_anon_in_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire auto_anon_in_d_ready_0 = auto_anon_in_d_ready; // @[WidthWidget.scala:27:9]
wire auto_anon_out_a_ready_0 = auto_anon_out_a_ready; // @[WidthWidget.scala:27:9]
wire auto_anon_out_d_valid_0 = auto_anon_out_d_valid; // @[WidthWidget.scala:27:9]
wire [2:0] auto_anon_out_d_bits_opcode_0 = auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] auto_anon_out_d_bits_param_0 = auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] auto_anon_out_d_bits_size_0 = auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9]
wire [4:0] auto_anon_out_d_bits_source_0 = auto_anon_out_d_bits_source; // @[WidthWidget.scala:27:9]
wire [2:0] auto_anon_out_d_bits_sink_0 = auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9]
wire auto_anon_out_d_bits_denied_0 = auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [63:0] auto_anon_out_d_bits_data_0 = auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9]
wire auto_anon_out_d_bits_corrupt_0 = auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire anonIn_a_ready; // @[MixedNode.scala:551:17]
wire anonIn_a_valid = auto_anon_in_a_valid_0; // @[WidthWidget.scala:27:9]
wire [2:0] anonIn_a_bits_opcode = auto_anon_in_a_bits_opcode_0; // @[WidthWidget.scala:27:9]
wire [2:0] anonIn_a_bits_param = auto_anon_in_a_bits_param_0; // @[WidthWidget.scala:27:9]
wire [3:0] anonIn_a_bits_size = auto_anon_in_a_bits_size_0; // @[WidthWidget.scala:27:9]
wire [4:0] anonIn_a_bits_source = auto_anon_in_a_bits_source_0; // @[WidthWidget.scala:27:9]
wire [31:0] anonIn_a_bits_address = auto_anon_in_a_bits_address_0; // @[WidthWidget.scala:27:9]
wire [31:0] anonIn_a_bits_mask = auto_anon_in_a_bits_mask_0; // @[WidthWidget.scala:27:9]
wire [255:0] anonIn_a_bits_data = auto_anon_in_a_bits_data_0; // @[WidthWidget.scala:27:9]
wire anonIn_a_bits_corrupt = auto_anon_in_a_bits_corrupt_0; // @[WidthWidget.scala:27:9]
wire anonIn_d_ready = auto_anon_in_d_ready_0; // @[WidthWidget.scala:27:9]
wire anonIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] anonIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] anonIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] anonIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [4:0] anonIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [2:0] anonIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire anonIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [255:0] anonIn_d_bits_data; // @[MixedNode.scala:551:17]
wire anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire anonOut_a_ready = auto_anon_out_a_ready_0; // @[WidthWidget.scala:27:9]
wire anonOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] anonOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] anonOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [4:0] anonOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] anonOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] anonOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] anonOut_a_bits_data; // @[MixedNode.scala:542:17]
wire anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire anonOut_d_ready; // @[MixedNode.scala:542:17]
wire anonOut_d_valid = auto_anon_out_d_valid_0; // @[WidthWidget.scala:27:9]
wire [2:0] anonOut_d_bits_opcode = auto_anon_out_d_bits_opcode_0; // @[WidthWidget.scala:27:9]
wire [1:0] anonOut_d_bits_param = auto_anon_out_d_bits_param_0; // @[WidthWidget.scala:27:9]
wire [3:0] anonOut_d_bits_size = auto_anon_out_d_bits_size_0; // @[WidthWidget.scala:27:9]
wire [4:0] anonOut_d_bits_source = auto_anon_out_d_bits_source_0; // @[WidthWidget.scala:27:9]
wire [2:0] anonOut_d_bits_sink = auto_anon_out_d_bits_sink_0; // @[WidthWidget.scala:27:9]
wire anonOut_d_bits_denied = auto_anon_out_d_bits_denied_0; // @[WidthWidget.scala:27:9]
wire [63:0] anonOut_d_bits_data = auto_anon_out_d_bits_data_0; // @[WidthWidget.scala:27:9]
wire anonOut_d_bits_corrupt = auto_anon_out_d_bits_corrupt_0; // @[WidthWidget.scala:27:9]
wire auto_anon_in_a_ready_0; // @[WidthWidget.scala:27:9]
wire [2:0] auto_anon_in_d_bits_opcode_0; // @[WidthWidget.scala:27:9]
wire [1:0] auto_anon_in_d_bits_param_0; // @[WidthWidget.scala:27:9]
wire [3:0] auto_anon_in_d_bits_size_0; // @[WidthWidget.scala:27:9]
wire [4:0] auto_anon_in_d_bits_source_0; // @[WidthWidget.scala:27:9]
wire [2:0] auto_anon_in_d_bits_sink_0; // @[WidthWidget.scala:27:9]
wire auto_anon_in_d_bits_denied_0; // @[WidthWidget.scala:27:9]
wire [255:0] auto_anon_in_d_bits_data_0; // @[WidthWidget.scala:27:9]
wire auto_anon_in_d_bits_corrupt_0; // @[WidthWidget.scala:27:9]
wire auto_anon_in_d_valid_0; // @[WidthWidget.scala:27:9]
wire [2:0] auto_anon_out_a_bits_opcode_0; // @[WidthWidget.scala:27:9]
wire [2:0] auto_anon_out_a_bits_param_0; // @[WidthWidget.scala:27:9]
wire [3:0] auto_anon_out_a_bits_size_0; // @[WidthWidget.scala:27:9]
wire [4:0] auto_anon_out_a_bits_source_0; // @[WidthWidget.scala:27:9]
wire [31:0] auto_anon_out_a_bits_address_0; // @[WidthWidget.scala:27:9]
wire [7:0] auto_anon_out_a_bits_mask_0; // @[WidthWidget.scala:27:9]
wire [63:0] auto_anon_out_a_bits_data_0; // @[WidthWidget.scala:27:9]
wire auto_anon_out_a_bits_corrupt_0; // @[WidthWidget.scala:27:9]
wire auto_anon_out_a_valid_0; // @[WidthWidget.scala:27:9]
wire auto_anon_out_d_ready_0; // @[WidthWidget.scala:27:9]
assign auto_anon_in_a_ready_0 = anonIn_a_ready; // @[WidthWidget.scala:27:9]
wire _anonIn_d_valid_T; // @[WidthWidget.scala:77:29]
assign auto_anon_in_d_valid_0 = anonIn_d_valid; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_bits_opcode_0 = anonIn_d_bits_opcode; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_bits_param_0 = anonIn_d_bits_param; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_bits_size_0 = anonIn_d_bits_size; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_bits_source_0 = anonIn_d_bits_source; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_bits_sink_0 = anonIn_d_bits_sink; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_bits_denied_0 = anonIn_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [255:0] _anonIn_d_bits_data_T_3; // @[WidthWidget.scala:73:12]
assign auto_anon_in_d_bits_data_0 = anonIn_d_bits_data; // @[WidthWidget.scala:27:9]
wire corrupt_out; // @[WidthWidget.scala:47:36]
assign auto_anon_in_d_bits_corrupt_0 = anonIn_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire cated_ready = anonOut_a_ready; // @[WidthWidget.scala:161:25]
wire cated_valid; // @[WidthWidget.scala:161:25]
assign auto_anon_out_a_valid_0 = anonOut_a_valid; // @[WidthWidget.scala:27:9]
wire [2:0] cated_bits_opcode; // @[WidthWidget.scala:161:25]
assign auto_anon_out_a_bits_opcode_0 = anonOut_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [2:0] cated_bits_param; // @[WidthWidget.scala:161:25]
assign auto_anon_out_a_bits_param_0 = anonOut_a_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] cated_bits_size; // @[WidthWidget.scala:161:25]
assign auto_anon_out_a_bits_size_0 = anonOut_a_bits_size; // @[WidthWidget.scala:27:9]
wire [4:0] cated_bits_source; // @[WidthWidget.scala:161:25]
assign auto_anon_out_a_bits_source_0 = anonOut_a_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] cated_bits_address; // @[WidthWidget.scala:161:25]
assign auto_anon_out_a_bits_address_0 = anonOut_a_bits_address; // @[WidthWidget.scala:27:9]
assign auto_anon_out_a_bits_mask_0 = anonOut_a_bits_mask; // @[WidthWidget.scala:27:9]
assign auto_anon_out_a_bits_data_0 = anonOut_a_bits_data; // @[WidthWidget.scala:27:9]
wire cated_bits_corrupt; // @[WidthWidget.scala:161:25]
assign auto_anon_out_a_bits_corrupt_0 = anonOut_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire _anonOut_d_ready_T_1; // @[WidthWidget.scala:76:29]
assign auto_anon_out_d_ready_0 = anonOut_d_ready; // @[WidthWidget.scala:27:9]
assign anonIn_d_bits_opcode = anonOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign anonIn_d_bits_param = anonOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign anonIn_d_bits_size = anonOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign anonIn_d_bits_source = anonOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign anonIn_d_bits_sink = anonOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign anonIn_d_bits_denied = anonOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
wire [63:0] anonIn_d_bits_data_odata_0 = anonOut_d_bits_data; // @[WidthWidget.scala:65:47]
wire [63:0] anonIn_d_bits_data_odata_1 = anonOut_d_bits_data; // @[WidthWidget.scala:65:47]
wire [63:0] anonIn_d_bits_data_odata_2 = anonOut_d_bits_data; // @[WidthWidget.scala:65:47]
wire [63:0] anonIn_d_bits_data_odata_3 = anonOut_d_bits_data; // @[WidthWidget.scala:65:47]
wire _repeat_T_1; // @[WidthWidget.scala:148:7]
wire repeat_0; // @[WidthWidget.scala:159:26]
assign anonOut_a_valid = cated_valid; // @[WidthWidget.scala:161:25]
assign anonOut_a_bits_opcode = cated_bits_opcode; // @[WidthWidget.scala:161:25]
assign anonOut_a_bits_param = cated_bits_param; // @[WidthWidget.scala:161:25]
assign anonOut_a_bits_size = cated_bits_size; // @[WidthWidget.scala:161:25]
assign anonOut_a_bits_source = cated_bits_source; // @[WidthWidget.scala:161:25]
assign anonOut_a_bits_address = cated_bits_address; // @[WidthWidget.scala:161:25]
wire [255:0] _cated_bits_data_T_2; // @[WidthWidget.scala:163:39]
assign anonOut_a_bits_corrupt = cated_bits_corrupt; // @[WidthWidget.scala:161:25]
wire [31:0] cated_bits_mask; // @[WidthWidget.scala:161:25]
wire [255:0] cated_bits_data; // @[WidthWidget.scala:161:25]
wire [191:0] _cated_bits_data_T = _repeated_repeater_io_deq_bits_data[255:64]; // @[Repeater.scala:36:26]
wire [63:0] _cated_bits_data_T_1 = anonIn_a_bits_data[63:0]; // @[WidthWidget.scala:165:31]
assign _cated_bits_data_T_2 = {_cated_bits_data_T, _cated_bits_data_T_1}; // @[WidthWidget.scala:163:39, :164:37, :165:31]
assign cated_bits_data = _cated_bits_data_T_2; // @[WidthWidget.scala:161:25, :163:39]
wire _repeat_hasData_opdata_T = cated_bits_opcode[2]; // @[WidthWidget.scala:161:25]
wire repeat_hasData = ~_repeat_hasData_opdata_T; // @[Edges.scala:92:{28,37}]
wire [19:0] _repeat_limit_T = 20'h1F << cated_bits_size; // @[package.scala:243:71]
wire [4:0] _repeat_limit_T_1 = _repeat_limit_T[4:0]; // @[package.scala:243:{71,76}]
wire [4:0] _repeat_limit_T_2 = ~_repeat_limit_T_1; // @[package.scala:243:{46,76}]
wire [1:0] repeat_limit = _repeat_limit_T_2[4:3]; // @[package.scala:243:46]
reg [1:0] repeat_count; // @[WidthWidget.scala:105:26]
wire repeat_first = repeat_count == 2'h0; // @[WidthWidget.scala:105:26, :106:25]
wire _repeat_last_T = repeat_count == repeat_limit; // @[WidthWidget.scala:103:47, :105:26, :107:25]
wire _repeat_last_T_1 = ~repeat_hasData; // @[WidthWidget.scala:107:38]
wire repeat_last = _repeat_last_T | _repeat_last_T_1; // @[WidthWidget.scala:107:{25,35,38}]
wire _repeat_T = anonOut_a_ready & anonOut_a_valid; // @[Decoupled.scala:51:35]
wire [2:0] _repeat_count_T = {1'h0, repeat_count} + 3'h1; // @[WidthWidget.scala:105:26, :110:24]
wire [1:0] _repeat_count_T_1 = _repeat_count_T[1:0]; // @[WidthWidget.scala:110:24]
wire [1:0] repeat_sel = cated_bits_address[4:3]; // @[WidthWidget.scala:116:39, :161:25]
wire [1:0] repeat_index = repeat_sel | repeat_count; // @[WidthWidget.scala:105:26, :116:39, :126:24]
wire [63:0] _repeat_anonOut_a_bits_data_mux_T = cated_bits_data[63:0]; // @[WidthWidget.scala:128:55, :161:25]
wire [63:0] repeat_anonOut_a_bits_data_mux_0 = _repeat_anonOut_a_bits_data_mux_T; // @[WidthWidget.scala:128:{43,55}]
wire [63:0] _repeat_anonOut_a_bits_data_mux_T_1 = cated_bits_data[127:64]; // @[WidthWidget.scala:128:55, :161:25]
wire [63:0] repeat_anonOut_a_bits_data_mux_1 = _repeat_anonOut_a_bits_data_mux_T_1; // @[WidthWidget.scala:128:{43,55}]
wire [63:0] _repeat_anonOut_a_bits_data_mux_T_2 = cated_bits_data[191:128]; // @[WidthWidget.scala:128:55, :161:25]
wire [63:0] repeat_anonOut_a_bits_data_mux_2 = _repeat_anonOut_a_bits_data_mux_T_2; // @[WidthWidget.scala:128:{43,55}]
wire [63:0] _repeat_anonOut_a_bits_data_mux_T_3 = cated_bits_data[255:192]; // @[WidthWidget.scala:128:55, :161:25]
wire [63:0] repeat_anonOut_a_bits_data_mux_3 = _repeat_anonOut_a_bits_data_mux_T_3; // @[WidthWidget.scala:128:{43,55}]
wire [3:0][63:0] _GEN = {{repeat_anonOut_a_bits_data_mux_3}, {repeat_anonOut_a_bits_data_mux_2}, {repeat_anonOut_a_bits_data_mux_1}, {repeat_anonOut_a_bits_data_mux_0}}; // @[WidthWidget.scala:128:43, :137:30]
assign anonOut_a_bits_data = _GEN[repeat_index]; // @[WidthWidget.scala:126:24, :137:30]
wire [7:0] _repeat_anonOut_a_bits_mask_mux_T = cated_bits_mask[7:0]; // @[WidthWidget.scala:128:55, :161:25]
wire [7:0] repeat_anonOut_a_bits_mask_mux_0 = _repeat_anonOut_a_bits_mask_mux_T; // @[WidthWidget.scala:128:{43,55}]
wire [7:0] _repeat_anonOut_a_bits_mask_mux_T_1 = cated_bits_mask[15:8]; // @[WidthWidget.scala:128:55, :161:25]
wire [7:0] repeat_anonOut_a_bits_mask_mux_1 = _repeat_anonOut_a_bits_mask_mux_T_1; // @[WidthWidget.scala:128:{43,55}]
wire [7:0] _repeat_anonOut_a_bits_mask_mux_T_2 = cated_bits_mask[23:16]; // @[WidthWidget.scala:128:55, :161:25]
wire [7:0] repeat_anonOut_a_bits_mask_mux_2 = _repeat_anonOut_a_bits_mask_mux_T_2; // @[WidthWidget.scala:128:{43,55}]
wire [7:0] _repeat_anonOut_a_bits_mask_mux_T_3 = cated_bits_mask[31:24]; // @[WidthWidget.scala:128:55, :161:25]
wire [7:0] repeat_anonOut_a_bits_mask_mux_3 = _repeat_anonOut_a_bits_mask_mux_T_3; // @[WidthWidget.scala:128:{43,55}]
wire [3:0][7:0] _GEN_0 = {{repeat_anonOut_a_bits_mask_mux_3}, {repeat_anonOut_a_bits_mask_mux_2}, {repeat_anonOut_a_bits_mask_mux_1}, {repeat_anonOut_a_bits_mask_mux_0}}; // @[WidthWidget.scala:128:43, :140:53]
assign anonOut_a_bits_mask = _GEN_0[repeat_index]; // @[WidthWidget.scala:126:24, :140:53]
assign _repeat_T_1 = ~repeat_last; // @[WidthWidget.scala:107:35, :148:7]
assign repeat_0 = _repeat_T_1; // @[WidthWidget.scala:148:7, :159:26]
wire hasData = anonOut_d_bits_opcode[0]; // @[Edges.scala:106:36]
wire [19:0] _limit_T = 20'h1F << anonOut_d_bits_size; // @[package.scala:243:71]
wire [4:0] _limit_T_1 = _limit_T[4:0]; // @[package.scala:243:{71,76}]
wire [4:0] _limit_T_2 = ~_limit_T_1; // @[package.scala:243:{46,76}]
wire [1:0] limit = _limit_T_2[4:3]; // @[package.scala:243:46]
reg [1:0] count; // @[WidthWidget.scala:40:27]
wire [1:0] _enable_T = count; // @[WidthWidget.scala:40:27, :43:56]
wire first = count == 2'h0; // @[WidthWidget.scala:40:27, :41:26]
wire _last_T = count == limit; // @[WidthWidget.scala:38:47, :40:27, :42:26]
wire _last_T_1 = ~hasData; // @[WidthWidget.scala:42:39]
wire last = _last_T | _last_T_1; // @[WidthWidget.scala:42:{26,36,39}]
wire [1:0] _enable_T_1 = _enable_T & limit; // @[WidthWidget.scala:38:47, :43:{56,63}]
wire _enable_T_2 = |_enable_T_1; // @[WidthWidget.scala:43:{63,72}]
wire enable_0 = ~_enable_T_2; // @[WidthWidget.scala:43:{47,72}]
wire [1:0] _enable_T_3 = {count[1], ~(count[0])}; // @[WidthWidget.scala:40:27, :43:56]
wire [1:0] _enable_T_4 = _enable_T_3 & limit; // @[WidthWidget.scala:38:47, :43:{56,63}]
wire _enable_T_5 = |_enable_T_4; // @[WidthWidget.scala:43:{63,72}]
wire enable_1 = ~_enable_T_5; // @[WidthWidget.scala:43:{47,72}]
wire [1:0] _enable_T_6 = count ^ 2'h2; // @[WidthWidget.scala:40:27, :43:56]
wire [1:0] _enable_T_7 = _enable_T_6 & limit; // @[WidthWidget.scala:38:47, :43:{56,63}]
wire _enable_T_8 = |_enable_T_7; // @[WidthWidget.scala:43:{63,72}]
wire enable_2 = ~_enable_T_8; // @[WidthWidget.scala:43:{47,72}]
wire [1:0] _enable_T_9 = ~count; // @[WidthWidget.scala:40:27, :43:56]
wire [1:0] _enable_T_10 = _enable_T_9 & limit; // @[WidthWidget.scala:38:47, :43:{56,63}]
wire _enable_T_11 = |_enable_T_10; // @[WidthWidget.scala:43:{63,72}]
wire enable_3 = ~_enable_T_11; // @[WidthWidget.scala:43:{47,72}]
reg corrupt_reg; // @[WidthWidget.scala:45:32]
assign corrupt_out = anonOut_d_bits_corrupt | corrupt_reg; // @[WidthWidget.scala:45:32, :47:36]
assign anonIn_d_bits_corrupt = corrupt_out; // @[WidthWidget.scala:47:36]
wire _anonIn_d_bits_data_T = anonOut_d_ready & anonOut_d_valid; // @[Decoupled.scala:51:35]
wire [2:0] _count_T = {1'h0, count} + 3'h1; // @[WidthWidget.scala:40:27, :50:24]
wire [1:0] _count_T_1 = _count_T[1:0]; // @[WidthWidget.scala:50:24]
wire _anonOut_d_ready_T = ~last; // @[WidthWidget.scala:42:36, :76:32]
assign _anonOut_d_ready_T_1 = anonIn_d_ready | _anonOut_d_ready_T; // @[WidthWidget.scala:76:{29,32}]
assign anonOut_d_ready = _anonOut_d_ready_T_1; // @[WidthWidget.scala:76:29]
assign _anonIn_d_valid_T = anonOut_d_valid & last; // @[WidthWidget.scala:42:36, :77:29]
assign anonIn_d_valid = _anonIn_d_valid_T; // @[WidthWidget.scala:77:29]
reg anonIn_d_bits_data_rdata_written_once; // @[WidthWidget.scala:62:41]
wire _anonIn_d_bits_data_masked_enable_T = ~anonIn_d_bits_data_rdata_written_once; // @[WidthWidget.scala:62:41, :63:45]
wire anonIn_d_bits_data_masked_enable_0 = enable_0 | _anonIn_d_bits_data_masked_enable_T; // @[WidthWidget.scala:43:47, :63:{42,45}]
wire _anonIn_d_bits_data_masked_enable_T_1 = ~anonIn_d_bits_data_rdata_written_once; // @[WidthWidget.scala:62:41, :63:45]
wire anonIn_d_bits_data_masked_enable_1 = enable_1 | _anonIn_d_bits_data_masked_enable_T_1; // @[WidthWidget.scala:43:47, :63:{42,45}]
wire _anonIn_d_bits_data_masked_enable_T_2 = ~anonIn_d_bits_data_rdata_written_once; // @[WidthWidget.scala:62:41, :63:45]
wire anonIn_d_bits_data_masked_enable_2 = enable_2 | _anonIn_d_bits_data_masked_enable_T_2; // @[WidthWidget.scala:43:47, :63:{42,45}]
wire _anonIn_d_bits_data_masked_enable_T_3 = ~anonIn_d_bits_data_rdata_written_once; // @[WidthWidget.scala:62:41, :63:45]
wire anonIn_d_bits_data_masked_enable_3 = enable_3 | _anonIn_d_bits_data_masked_enable_T_3; // @[WidthWidget.scala:43:47, :63:{42,45}]
reg [63:0] anonIn_d_bits_data_rdata_0; // @[WidthWidget.scala:66:24]
reg [63:0] anonIn_d_bits_data_rdata_1; // @[WidthWidget.scala:66:24]
reg [63:0] anonIn_d_bits_data_rdata_2; // @[WidthWidget.scala:66:24]
wire [63:0] anonIn_d_bits_data_mdata_0 = anonIn_d_bits_data_masked_enable_0 ? anonIn_d_bits_data_odata_0 : anonIn_d_bits_data_rdata_0; // @[WidthWidget.scala:63:42, :65:47, :66:24, :68:88]
wire [63:0] anonIn_d_bits_data_mdata_1 = anonIn_d_bits_data_masked_enable_1 ? anonIn_d_bits_data_odata_1 : anonIn_d_bits_data_rdata_1; // @[WidthWidget.scala:63:42, :65:47, :66:24, :68:88]
wire [63:0] anonIn_d_bits_data_mdata_2 = anonIn_d_bits_data_masked_enable_2 ? anonIn_d_bits_data_odata_2 : anonIn_d_bits_data_rdata_2; // @[WidthWidget.scala:63:42, :65:47, :66:24, :68:88]
wire [63:0] anonIn_d_bits_data_mdata_3 = anonIn_d_bits_data_masked_enable_3 ? anonIn_d_bits_data_odata_3 : anonOut_d_bits_data; // @[WidthWidget.scala:63:42, :65:47, :68:88]
wire _anonIn_d_bits_data_T_1 = ~last; // @[WidthWidget.scala:42:36, :69:26, :76:32]
wire _anonIn_d_bits_data_T_2 = _anonIn_d_bits_data_T & _anonIn_d_bits_data_T_1; // @[Decoupled.scala:51:35]
wire [127:0] anonIn_d_bits_data_lo = {anonIn_d_bits_data_mdata_1, anonIn_d_bits_data_mdata_0}; // @[WidthWidget.scala:68:88, :73:12]
wire [127:0] anonIn_d_bits_data_hi = {anonIn_d_bits_data_mdata_3, anonIn_d_bits_data_mdata_2}; // @[WidthWidget.scala:68:88, :73:12]
assign _anonIn_d_bits_data_T_3 = {anonIn_d_bits_data_hi, anonIn_d_bits_data_lo}; // @[WidthWidget.scala:73:12]
assign anonIn_d_bits_data = _anonIn_d_bits_data_T_3; // @[WidthWidget.scala:73:12]
always @(posedge clock) begin // @[WidthWidget.scala:27:9]
if (reset) begin // @[WidthWidget.scala:27:9]
repeat_count <= 2'h0; // @[WidthWidget.scala:105:26]
count <= 2'h0; // @[WidthWidget.scala:40:27]
corrupt_reg <= 1'h0; // @[WidthWidget.scala:45:32]
anonIn_d_bits_data_rdata_written_once <= 1'h0; // @[WidthWidget.scala:62:41]
end
else begin // @[WidthWidget.scala:27:9]
if (_repeat_T) // @[Decoupled.scala:51:35]
repeat_count <= repeat_last ? 2'h0 : _repeat_count_T_1; // @[WidthWidget.scala:105:26, :107:35, :110:{15,24}, :111:{21,29}]
if (_anonIn_d_bits_data_T) begin // @[Decoupled.scala:51:35]
count <= last ? 2'h0 : _count_T_1; // @[WidthWidget.scala:40:27, :42:36, :50:{15,24}, :52:21, :53:17]
corrupt_reg <= ~last & corrupt_out; // @[WidthWidget.scala:42:36, :45:32, :47:36, :51:21, :52:21, :54:23]
end
anonIn_d_bits_data_rdata_written_once <= _anonIn_d_bits_data_T_2 | anonIn_d_bits_data_rdata_written_once; // @[WidthWidget.scala:62:41, :69:{23,33}, :70:30]
end
if (_anonIn_d_bits_data_T_2) begin // @[WidthWidget.scala:69:23]
anonIn_d_bits_data_rdata_0 <= anonIn_d_bits_data_mdata_0; // @[WidthWidget.scala:66:24, :68:88]
anonIn_d_bits_data_rdata_1 <= anonIn_d_bits_data_mdata_1; // @[WidthWidget.scala:66:24, :68:88]
anonIn_d_bits_data_rdata_2 <= anonIn_d_bits_data_mdata_2; // @[WidthWidget.scala:66:24, :68:88]
end
always @(posedge)
TLMonitor_75 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (anonIn_a_ready), // @[MixedNode.scala:551:17]
.io_in_a_valid (anonIn_a_valid), // @[MixedNode.scala:551:17]
.io_in_a_bits_opcode (anonIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_a_bits_param (anonIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_in_a_bits_size (anonIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_in_a_bits_source (anonIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_in_a_bits_address (anonIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_in_a_bits_mask (anonIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_in_a_bits_data (anonIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_in_a_bits_corrupt (anonIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_d_ready (anonIn_d_ready), // @[MixedNode.scala:551:17]
.io_in_d_valid (anonIn_d_valid), // @[MixedNode.scala:551:17]
.io_in_d_bits_opcode (anonIn_d_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_d_bits_param (anonIn_d_bits_param), // @[MixedNode.scala:551:17]
.io_in_d_bits_size (anonIn_d_bits_size), // @[MixedNode.scala:551:17]
.io_in_d_bits_source (anonIn_d_bits_source), // @[MixedNode.scala:551:17]
.io_in_d_bits_sink (anonIn_d_bits_sink), // @[MixedNode.scala:551:17]
.io_in_d_bits_denied (anonIn_d_bits_denied), // @[MixedNode.scala:551:17]
.io_in_d_bits_data (anonIn_d_bits_data), // @[MixedNode.scala:551:17]
.io_in_d_bits_corrupt (anonIn_d_bits_corrupt) // @[MixedNode.scala:551:17]
); // @[Nodes.scala:27:25]
Repeater_TLBundleA_a32d256s5k3z4u_13 repeated_repeater ( // @[Repeater.scala:36:26]
.clock (clock),
.reset (reset),
.io_repeat (repeat_0), // @[WidthWidget.scala:159:26]
.io_enq_ready (anonIn_a_ready),
.io_enq_valid (anonIn_a_valid), // @[MixedNode.scala:551:17]
.io_enq_bits_opcode (anonIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_enq_bits_param (anonIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_enq_bits_size (anonIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_enq_bits_source (anonIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_enq_bits_address (anonIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_enq_bits_mask (anonIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_enq_bits_data (anonIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_enq_bits_corrupt (anonIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_deq_ready (cated_ready), // @[WidthWidget.scala:161:25]
.io_deq_valid (cated_valid),
.io_deq_bits_opcode (cated_bits_opcode),
.io_deq_bits_param (cated_bits_param),
.io_deq_bits_size (cated_bits_size),
.io_deq_bits_source (cated_bits_source),
.io_deq_bits_address (cated_bits_address),
.io_deq_bits_mask (cated_bits_mask),
.io_deq_bits_data (_repeated_repeater_io_deq_bits_data),
.io_deq_bits_corrupt (cated_bits_corrupt)
); // @[Repeater.scala:36:26]
assign auto_anon_in_a_ready = auto_anon_in_a_ready_0; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_valid = auto_anon_in_d_valid_0; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_bits_opcode = auto_anon_in_d_bits_opcode_0; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_bits_param = auto_anon_in_d_bits_param_0; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_bits_size = auto_anon_in_d_bits_size_0; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_bits_source = auto_anon_in_d_bits_source_0; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_bits_sink = auto_anon_in_d_bits_sink_0; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_bits_denied = auto_anon_in_d_bits_denied_0; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_bits_data = auto_anon_in_d_bits_data_0; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_bits_corrupt = auto_anon_in_d_bits_corrupt_0; // @[WidthWidget.scala:27:9]
assign auto_anon_out_a_valid = auto_anon_out_a_valid_0; // @[WidthWidget.scala:27:9]
assign auto_anon_out_a_bits_opcode = auto_anon_out_a_bits_opcode_0; // @[WidthWidget.scala:27:9]
assign auto_anon_out_a_bits_param = auto_anon_out_a_bits_param_0; // @[WidthWidget.scala:27:9]
assign auto_anon_out_a_bits_size = auto_anon_out_a_bits_size_0; // @[WidthWidget.scala:27:9]
assign auto_anon_out_a_bits_source = auto_anon_out_a_bits_source_0; // @[WidthWidget.scala:27:9]
assign auto_anon_out_a_bits_address = auto_anon_out_a_bits_address_0; // @[WidthWidget.scala:27:9]
assign auto_anon_out_a_bits_mask = auto_anon_out_a_bits_mask_0; // @[WidthWidget.scala:27:9]
assign auto_anon_out_a_bits_data = auto_anon_out_a_bits_data_0; // @[WidthWidget.scala:27:9]
assign auto_anon_out_a_bits_corrupt = auto_anon_out_a_bits_corrupt_0; // @[WidthWidget.scala:27:9]
assign auto_anon_out_d_ready = auto_anon_out_d_ready_0; // @[WidthWidget.scala:27:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_44( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [6:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire a_first_done = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35]
reg a_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [6:0] address; // @[Monitor.scala:391:22]
reg d_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] inflight; // @[Monitor.scala:614:27]
reg [3:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [3:0] inflight_sizes; // @[Monitor.scala:618:33]
reg a_first_counter_1; // @[Edges.scala:229:27]
reg d_first_counter_1; // @[Edges.scala:229:27]
wire a_set = a_first_done & ~a_first_counter_1; // @[Decoupled.scala:51:35]
wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46]
wire _GEN = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
reg [1:0] inflight_1; // @[Monitor.scala:726:35]
reg [3:0] inflight_sizes_1; // @[Monitor.scala:728:35]
reg d_first_counter_2; // @[Edges.scala:229:27]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
package constellation.channel
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy._
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.util._
import constellation.noc.{HasNoCParams}
class NoCMonitor(val cParam: ChannelParams)(implicit val p: Parameters) extends Module with HasNoCParams {
val io = IO(new Bundle {
val in = Input(new Channel(cParam))
})
val in_flight = RegInit(VecInit(Seq.fill(cParam.nVirtualChannels) { false.B }))
for (i <- 0 until cParam.srcSpeedup) {
val flit = io.in.flit(i)
when (flit.valid) {
when (flit.bits.head) {
in_flight(flit.bits.virt_channel_id) := true.B
assert (!in_flight(flit.bits.virt_channel_id), "Flit head/tail sequencing is broken")
}
when (flit.bits.tail) {
in_flight(flit.bits.virt_channel_id) := false.B
}
}
val possibleFlows = cParam.possibleFlows
when (flit.valid && flit.bits.head) {
cParam match {
case n: ChannelParams => n.virtualChannelParams.zipWithIndex.foreach { case (v,i) =>
assert(flit.bits.virt_channel_id =/= i.U || v.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
case _ => assert(cParam.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
}
}
}
File Types.scala:
package constellation.routing
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Parameters}
import constellation.noc.{HasNoCParams}
import constellation.channel.{Flit}
/** A representation for 1 specific virtual channel in wormhole routing
*
* @param src the source node
* @param vc ID for the virtual channel
* @param dst the destination node
* @param n_vc the number of virtual channels
*/
// BEGIN: ChannelRoutingInfo
case class ChannelRoutingInfo(
src: Int,
dst: Int,
vc: Int,
n_vc: Int
) {
// END: ChannelRoutingInfo
require (src >= -1 && dst >= -1 && vc >= 0, s"Illegal $this")
require (!(src == -1 && dst == -1), s"Illegal $this")
require (vc < n_vc, s"Illegal $this")
val isIngress = src == -1
val isEgress = dst == -1
}
/** Represents the properties of a packet that are relevant for routing
* ingressId and egressId uniquely identify a flow, but vnet and dst are used here
* to simplify the implementation of routingrelations
*
* @param ingressId packet's source ingress point
* @param egressId packet's destination egress point
* @param vNet virtual subnetwork identifier
* @param dst packet's destination node ID
*/
// BEGIN: FlowRoutingInfo
case class FlowRoutingInfo(
ingressId: Int,
egressId: Int,
vNetId: Int,
ingressNode: Int,
ingressNodeId: Int,
egressNode: Int,
egressNodeId: Int,
fifo: Boolean
) {
// END: FlowRoutingInfo
def isFlow(f: FlowRoutingBundle): Bool = {
(f.ingress_node === ingressNode.U &&
f.egress_node === egressNode.U &&
f.ingress_node_id === ingressNodeId.U &&
f.egress_node_id === egressNodeId.U)
}
def asLiteral(b: FlowRoutingBundle): BigInt = {
Seq(
(vNetId , b.vnet_id),
(ingressNode , b.ingress_node),
(ingressNodeId , b.ingress_node_id),
(egressNode , b.egress_node),
(egressNodeId , b.egress_node_id)
).foldLeft(0)((l, t) => {
(l << t._2.getWidth) | t._1
})
}
}
class FlowRoutingBundle(implicit val p: Parameters) extends Bundle with HasNoCParams {
// Instead of tracking ingress/egress ID, track the physical destination id and the offset at the destination
// This simplifies the routing tables
val vnet_id = UInt(log2Ceil(nVirtualNetworks).W)
val ingress_node = UInt(log2Ceil(nNodes).W)
val ingress_node_id = UInt(log2Ceil(maxIngressesAtNode).W)
val egress_node = UInt(log2Ceil(nNodes).W)
val egress_node_id = UInt(log2Ceil(maxEgressesAtNode).W)
}
| module NoCMonitor_59( // @[Monitor.scala:11:7]
input clock, // @[Monitor.scala:11:7]
input reset, // @[Monitor.scala:11:7]
input io_in_flit_0_valid, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_head, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_tail, // @[Monitor.scala:12:14]
input [4:0] io_in_flit_0_bits_flow_ingress_node, // @[Monitor.scala:12:14]
input [1:0] io_in_flit_0_bits_flow_ingress_node_id, // @[Monitor.scala:12:14]
input [4:0] io_in_flit_0_bits_flow_egress_node, // @[Monitor.scala:12:14]
input [1:0] io_in_flit_0_bits_flow_egress_node_id, // @[Monitor.scala:12:14]
input [2:0] io_in_flit_0_bits_virt_channel_id // @[Monitor.scala:12:14]
);
reg in_flight_0; // @[Monitor.scala:16:26]
reg in_flight_1; // @[Monitor.scala:16:26]
reg in_flight_2; // @[Monitor.scala:16:26]
reg in_flight_3; // @[Monitor.scala:16:26]
reg in_flight_4; // @[Monitor.scala:16:26]
reg in_flight_5; // @[Monitor.scala:16:26]
reg in_flight_6; // @[Monitor.scala:16:26]
reg in_flight_7; // @[Monitor.scala:16:26] |
Generate the Verilog code corresponding to the following Chisel files.
File Serdes.scala:
package testchipip.serdes
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy._
import org.chipsalliance.cde.config._
class GenericSerializer[T <: Data](t: T, flitWidth: Int) extends Module {
override def desiredName = s"GenericSerializer_${t.typeName}w${t.getWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(t))
val out = Decoupled(new Flit(flitWidth))
val busy = Output(Bool())
})
val dataBits = t.getWidth.max(flitWidth)
val dataBeats = (dataBits - 1) / flitWidth + 1
require(dataBeats >= 1)
val data = Reg(Vec(dataBeats, UInt(flitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready && beat === 0.U
io.out.valid := io.in.valid || beat =/= 0.U
io.out.bits.flit := Mux(beat === 0.U, io.in.bits.asUInt, data(beat))
when (io.out.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) {
data := io.in.bits.asTypeOf(Vec(dataBeats, UInt(flitWidth.W)))
data(0) := DontCare // unused, DCE this
}
}
io.busy := io.out.valid
}
class GenericDeserializer[T <: Data](t: T, flitWidth: Int) extends Module {
override def desiredName = s"GenericDeserializer_${t.typeName}w${t.getWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(t)
val busy = Output(Bool())
})
val dataBits = t.getWidth.max(flitWidth)
val dataBeats = (dataBits - 1) / flitWidth + 1
require(dataBeats >= 1)
val data = Reg(Vec(dataBeats-1, UInt(flitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready || beat =/= (dataBeats-1).U
io.out.valid := io.in.valid && beat === (dataBeats-1).U
io.out.bits := (if (dataBeats == 1) {
io.in.bits.flit.asTypeOf(t)
} else {
Cat(io.in.bits.flit, data.asUInt).asTypeOf(t)
})
when (io.in.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
if (dataBeats > 1) {
when (beat =/= (dataBeats-1).U) {
data(beat(log2Ceil(dataBeats-1)-1,0)) := io.in.bits.flit
}
}
}
io.busy := beat =/= 0.U
}
class FlitToPhit(flitWidth: Int, phitWidth: Int) extends Module {
override def desiredName = s"FlitToPhit_f${flitWidth}_p${phitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Phit(phitWidth))
})
require(flitWidth >= phitWidth)
val dataBeats = (flitWidth - 1) / phitWidth + 1
val data = Reg(Vec(dataBeats-1, UInt(phitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready && beat === 0.U
io.out.valid := io.in.valid || beat =/= 0.U
io.out.bits.phit := (if (dataBeats == 1) io.in.bits.flit else Mux(beat === 0.U, io.in.bits.flit, data(beat-1.U)))
when (io.out.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) {
data := io.in.bits.asTypeOf(Vec(dataBeats, UInt(phitWidth.W))).tail
}
}
}
object FlitToPhit {
def apply(flit: DecoupledIO[Flit], phitWidth: Int): DecoupledIO[Phit] = {
val flit2phit = Module(new FlitToPhit(flit.bits.flitWidth, phitWidth))
flit2phit.io.in <> flit
flit2phit.io.out
}
}
class PhitToFlit(flitWidth: Int, phitWidth: Int) extends Module {
override def desiredName = s"PhitToFlit_p${phitWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Phit(phitWidth)))
val out = Decoupled(new Flit(flitWidth))
})
require(flitWidth >= phitWidth)
val dataBeats = (flitWidth - 1) / phitWidth + 1
val data = Reg(Vec(dataBeats-1, UInt(phitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready || beat =/= (dataBeats-1).U
io.out.valid := io.in.valid && beat === (dataBeats-1).U
io.out.bits.flit := (if (dataBeats == 1) io.in.bits.phit else Cat(io.in.bits.phit, data.asUInt))
when (io.in.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
if (dataBeats > 1) {
when (beat =/= (dataBeats-1).U) {
data(beat) := io.in.bits.phit
}
}
}
}
object PhitToFlit {
def apply(phit: DecoupledIO[Phit], flitWidth: Int): DecoupledIO[Flit] = {
val phit2flit = Module(new PhitToFlit(flitWidth, phit.bits.phitWidth))
phit2flit.io.in <> phit
phit2flit.io.out
}
def apply(phit: ValidIO[Phit], flitWidth: Int): ValidIO[Flit] = {
val phit2flit = Module(new PhitToFlit(flitWidth, phit.bits.phitWidth))
phit2flit.io.in.valid := phit.valid
phit2flit.io.in.bits := phit.bits
when (phit.valid) { assert(phit2flit.io.in.ready) }
val out = Wire(Valid(new Flit(flitWidth)))
out.valid := phit2flit.io.out.valid
out.bits := phit2flit.io.out.bits
phit2flit.io.out.ready := true.B
out
}
}
class PhitArbiter(phitWidth: Int, flitWidth: Int, channels: Int) extends Module {
override def desiredName = s"PhitArbiter_p${phitWidth}_f${flitWidth}_n${channels}"
val io = IO(new Bundle {
val in = Flipped(Vec(channels, Decoupled(new Phit(phitWidth))))
val out = Decoupled(new Phit(phitWidth))
})
if (channels == 1) {
io.out <> io.in(0)
} else {
val headerWidth = log2Ceil(channels)
val headerBeats = (headerWidth - 1) / phitWidth + 1
val flitBeats = (flitWidth - 1) / phitWidth + 1
val beats = headerBeats + flitBeats
val beat = RegInit(0.U(log2Ceil(beats).W))
val chosen_reg = Reg(UInt(headerWidth.W))
val chosen_prio = PriorityEncoder(io.in.map(_.valid))
val chosen = Mux(beat === 0.U, chosen_prio, chosen_reg)
val header_idx = if (headerBeats == 1) 0.U else beat(log2Ceil(headerBeats)-1,0)
io.out.valid := VecInit(io.in.map(_.valid))(chosen)
io.out.bits.phit := Mux(beat < headerBeats.U,
chosen.asTypeOf(Vec(headerBeats, UInt(phitWidth.W)))(header_idx),
VecInit(io.in.map(_.bits.phit))(chosen))
for (i <- 0 until channels) {
io.in(i).ready := io.out.ready && beat >= headerBeats.U && chosen_reg === i.U
}
when (io.out.fire) {
beat := Mux(beat === (beats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) { chosen_reg := chosen_prio }
}
}
}
class PhitDemux(phitWidth: Int, flitWidth: Int, channels: Int) extends Module {
override def desiredName = s"PhitDemux_p${phitWidth}_f${flitWidth}_n${channels}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Phit(phitWidth)))
val out = Vec(channels, Decoupled(new Phit(phitWidth)))
})
if (channels == 1) {
io.out(0) <> io.in
} else {
val headerWidth = log2Ceil(channels)
val headerBeats = (headerWidth - 1) / phitWidth + 1
val flitBeats = (flitWidth - 1) / phitWidth + 1
val beats = headerBeats + flitBeats
val beat = RegInit(0.U(log2Ceil(beats).W))
val channel_vec = Reg(Vec(headerBeats, UInt(phitWidth.W)))
val channel = channel_vec.asUInt(log2Ceil(channels)-1,0)
val header_idx = if (headerBeats == 1) 0.U else beat(log2Ceil(headerBeats)-1,0)
io.in.ready := beat < headerBeats.U || VecInit(io.out.map(_.ready))(channel)
for (c <- 0 until channels) {
io.out(c).valid := io.in.valid && beat >= headerBeats.U && channel === c.U
io.out(c).bits.phit := io.in.bits.phit
}
when (io.in.fire) {
beat := Mux(beat === (beats-1).U, 0.U, beat + 1.U)
when (beat < headerBeats.U) {
channel_vec(header_idx) := io.in.bits.phit
}
}
}
}
class DecoupledFlitToCreditedFlit(flitWidth: Int, bufferSz: Int) extends Module {
override def desiredName = s"DecoupledFlitToCreditedFlit_f${flitWidth}_b${bufferSz}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Flit(flitWidth))
val credit = Flipped(Decoupled(new Flit(flitWidth)))
})
val creditWidth = log2Ceil(bufferSz)
require(creditWidth <= flitWidth)
val credits = RegInit(0.U((creditWidth+1).W))
val credit_incr = io.out.fire
val credit_decr = io.credit.fire
when (credit_incr || credit_decr) {
credits := credits + credit_incr - Mux(io.credit.valid, io.credit.bits.flit +& 1.U, 0.U)
}
io.out.valid := io.in.valid && credits < bufferSz.U
io.out.bits.flit := io.in.bits.flit
io.in.ready := io.out.ready && credits < bufferSz.U
io.credit.ready := true.B
}
class CreditedFlitToDecoupledFlit(flitWidth: Int, bufferSz: Int) extends Module {
override def desiredName = s"CreditedFlitToDecoupledFlit_f${flitWidth}_b${bufferSz}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Flit(flitWidth))
val credit = Decoupled(new Flit(flitWidth))
})
val creditWidth = log2Ceil(bufferSz)
require(creditWidth <= flitWidth)
val buffer = Module(new Queue(new Flit(flitWidth), bufferSz))
val credits = RegInit(0.U((creditWidth+1).W))
val credit_incr = buffer.io.deq.fire
val credit_decr = io.credit.fire
when (credit_incr || credit_decr) {
credits := credit_incr + Mux(credit_decr, 0.U, credits)
}
buffer.io.enq.valid := io.in.valid
buffer.io.enq.bits := io.in.bits
io.in.ready := true.B
when (io.in.valid) { assert(buffer.io.enq.ready) }
io.out <> buffer.io.deq
io.credit.valid := credits =/= 0.U
io.credit.bits.flit := credits - 1.U
}
| module GenericDeserializer_TLBeatw67_f32_1( // @[Serdes.scala:37:7]
input clock, // @[Serdes.scala:37:7]
input reset, // @[Serdes.scala:37:7]
output io_in_ready, // @[Serdes.scala:39:14]
input io_in_valid, // @[Serdes.scala:39:14]
input [31:0] io_in_bits_flit, // @[Serdes.scala:39:14]
input io_out_ready, // @[Serdes.scala:39:14]
output io_out_valid, // @[Serdes.scala:39:14]
output [64:0] io_out_bits_payload, // @[Serdes.scala:39:14]
output io_out_bits_head, // @[Serdes.scala:39:14]
output io_out_bits_tail, // @[Serdes.scala:39:14]
output io_busy // @[Serdes.scala:39:14]
);
wire io_in_valid_0 = io_in_valid; // @[Serdes.scala:37:7]
wire [31:0] io_in_bits_flit_0 = io_in_bits_flit; // @[Serdes.scala:37:7]
wire io_out_ready_0 = io_out_ready; // @[Serdes.scala:37:7]
wire _io_in_ready_T_1; // @[Serdes.scala:51:31]
wire _io_out_valid_T_1; // @[Serdes.scala:52:31]
wire [64:0] _io_out_bits_WIRE_payload; // @[Serdes.scala:56:47]
wire _io_out_bits_WIRE_head; // @[Serdes.scala:56:47]
wire _io_out_bits_WIRE_tail; // @[Serdes.scala:56:47]
wire _io_busy_T; // @[Serdes.scala:68:19]
wire io_in_ready_0; // @[Serdes.scala:37:7]
wire [64:0] io_out_bits_payload_0; // @[Serdes.scala:37:7]
wire io_out_bits_head_0; // @[Serdes.scala:37:7]
wire io_out_bits_tail_0; // @[Serdes.scala:37:7]
wire io_out_valid_0; // @[Serdes.scala:37:7]
wire io_busy_0; // @[Serdes.scala:37:7]
reg [31:0] data_0; // @[Serdes.scala:48:17]
reg [31:0] data_1; // @[Serdes.scala:48:17]
reg [1:0] beat; // @[Serdes.scala:49:21]
wire _io_in_ready_T = beat != 2'h2; // @[Serdes.scala:49:21, :51:39]
assign _io_in_ready_T_1 = io_out_ready_0 | _io_in_ready_T; // @[Serdes.scala:37:7, :51:{31,39}]
assign io_in_ready_0 = _io_in_ready_T_1; // @[Serdes.scala:37:7, :51:31]
wire _GEN = beat == 2'h2; // @[Serdes.scala:49:21, :52:39]
wire _io_out_valid_T; // @[Serdes.scala:52:39]
assign _io_out_valid_T = _GEN; // @[Serdes.scala:52:39]
wire _beat_T; // @[Serdes.scala:60:22]
assign _beat_T = _GEN; // @[Serdes.scala:52:39, :60:22]
assign _io_out_valid_T_1 = io_in_valid_0 & _io_out_valid_T; // @[Serdes.scala:37:7, :52:{31,39}]
assign io_out_valid_0 = _io_out_valid_T_1; // @[Serdes.scala:37:7, :52:31]
wire [63:0] _io_out_bits_T = {data_1, data_0}; // @[Serdes.scala:48:17, :56:31]
wire [95:0] _io_out_bits_T_1 = {io_in_bits_flit_0, _io_out_bits_T}; // @[Serdes.scala:37:7, :56:{8,31}]
wire [64:0] _io_out_bits_T_4; // @[Serdes.scala:56:47]
assign io_out_bits_payload_0 = _io_out_bits_WIRE_payload; // @[Serdes.scala:37:7, :56:47]
wire _io_out_bits_T_3; // @[Serdes.scala:56:47]
assign io_out_bits_head_0 = _io_out_bits_WIRE_head; // @[Serdes.scala:37:7, :56:47]
wire _io_out_bits_T_2; // @[Serdes.scala:56:47]
assign io_out_bits_tail_0 = _io_out_bits_WIRE_tail; // @[Serdes.scala:37:7, :56:47]
wire [66:0] _io_out_bits_WIRE_1 = _io_out_bits_T_1[66:0]; // @[Serdes.scala:56:{8,47}]
assign _io_out_bits_T_2 = _io_out_bits_WIRE_1[0]; // @[Serdes.scala:56:47]
assign _io_out_bits_WIRE_tail = _io_out_bits_T_2; // @[Serdes.scala:56:47]
assign _io_out_bits_T_3 = _io_out_bits_WIRE_1[1]; // @[Serdes.scala:56:47]
assign _io_out_bits_WIRE_head = _io_out_bits_T_3; // @[Serdes.scala:56:47]
assign _io_out_bits_T_4 = _io_out_bits_WIRE_1[66:2]; // @[Serdes.scala:56:47]
assign _io_out_bits_WIRE_payload = _io_out_bits_T_4; // @[Serdes.scala:56:47]
wire [2:0] _beat_T_1 = {1'h0, beat} + 3'h1; // @[Serdes.scala:49:21, :60:53]
wire [1:0] _beat_T_2 = _beat_T_1[1:0]; // @[Serdes.scala:60:53]
wire [1:0] _beat_T_3 = _beat_T ? 2'h0 : _beat_T_2; // @[Serdes.scala:60:{16,22,53}]
assign _io_busy_T = |beat; // @[Serdes.scala:49:21, :68:19]
assign io_busy_0 = _io_busy_T; // @[Serdes.scala:37:7, :68:19]
wire _T = io_in_ready_0 & io_in_valid_0; // @[Decoupled.scala:51:35]
always @(posedge clock) begin // @[Serdes.scala:37:7]
if (~_T | _GEN | beat[0]) begin // @[Decoupled.scala:51:35]
end
else // @[Serdes.scala:48:17, :51:39, :59:21, :62:39, :63:47]
data_0 <= io_in_bits_flit_0; // @[Serdes.scala:37:7, :48:17]
if (~_T | _GEN | ~(beat[0])) begin // @[Decoupled.scala:51:35]
end
else // @[Serdes.scala:48:17, :51:39, :59:21, :62:39, :63:47]
data_1 <= io_in_bits_flit_0; // @[Serdes.scala:37:7, :48:17]
if (reset) // @[Serdes.scala:37:7]
beat <= 2'h0; // @[Serdes.scala:49:21]
else if (_T) // @[Decoupled.scala:51:35]
beat <= _beat_T_3; // @[Serdes.scala:49:21, :60:16]
always @(posedge)
assign io_in_ready = io_in_ready_0; // @[Serdes.scala:37:7]
assign io_out_valid = io_out_valid_0; // @[Serdes.scala:37:7]
assign io_out_bits_payload = io_out_bits_payload_0; // @[Serdes.scala:37:7]
assign io_out_bits_head = io_out_bits_head_0; // @[Serdes.scala:37:7]
assign io_out_bits_tail = io_out_bits_tail_0; // @[Serdes.scala:37:7]
assign io_busy = io_busy_0; // @[Serdes.scala:37:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_113( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_134 io_out_source_valid_1 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RecFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import consts._
class
RecFNToRecFN(
inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int)
extends chisel3.RawModule
{
val io = IO(new Bundle {
val in = Input(Bits((inExpWidth + inSigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawIn = rawFloatFromRecFN(inExpWidth, inSigWidth, io.in);
if ((inExpWidth == outExpWidth) && (inSigWidth <= outSigWidth)) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
io.out := io.in<<(outSigWidth - inSigWidth)
io.exceptionFlags := isSigNaNRawFloat(rawIn) ## 0.U(4.W)
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
inExpWidth,
inSigWidth,
outExpWidth,
outSigWidth,
flRoundOpt_sigMSBitAlwaysZero
))
roundAnyRawFNToRecFN.io.invalidExc := isSigNaNRawFloat(rawIn)
roundAnyRawFNToRecFN.io.infiniteExc := false.B
roundAnyRawFNToRecFN.io.in := rawIn
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
}
File rawFloatFromRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
/*----------------------------------------------------------------------------
| In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be
| set.
*----------------------------------------------------------------------------*/
object rawFloatFromRecFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat =
{
val exp = in(expWidth + sigWidth - 1, sigWidth - 1)
val isZero = exp(expWidth, expWidth - 2) === 0.U
val isSpecial = exp(expWidth, expWidth - 1) === 3.U
val out = Wire(new RawFloat(expWidth, sigWidth))
out.isNaN := isSpecial && exp(expWidth - 2)
out.isInf := isSpecial && ! exp(expWidth - 2)
out.isZero := isZero
out.sign := in(expWidth + sigWidth)
out.sExp := exp.zext
out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0)
out
}
}
| module RecFNToRecFN_257( // @[RecFNToRecFN.scala:44:5]
input [32:0] io_in, // @[RecFNToRecFN.scala:48:16]
output [32:0] io_out // @[RecFNToRecFN.scala:48:16]
);
wire [32:0] io_in_0 = io_in; // @[RecFNToRecFN.scala:44:5]
wire io_detectTininess = 1'h1; // @[RecFNToRecFN.scala:44:5, :48:16]
wire [2:0] io_roundingMode = 3'h0; // @[RecFNToRecFN.scala:44:5, :48:16]
wire [32:0] _io_out_T = io_in_0; // @[RecFNToRecFN.scala:44:5, :64:35]
wire [4:0] _io_exceptionFlags_T_3; // @[RecFNToRecFN.scala:65:54]
wire [32:0] io_out_0; // @[RecFNToRecFN.scala:44:5]
wire [4:0] io_exceptionFlags; // @[RecFNToRecFN.scala:44:5]
wire [8:0] rawIn_exp = io_in_0[31:23]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _rawIn_isZero_T = rawIn_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire rawIn_isZero = _rawIn_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
wire rawIn_isZero_0 = rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _rawIn_isSpecial_T = rawIn_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire rawIn_isSpecial = &_rawIn_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
wire _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
wire _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire [9:0] _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire [24:0] _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire rawIn_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire rawIn_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [9:0] rawIn_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] rawIn_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _rawIn_out_isNaN_T = rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _rawIn_out_isInf_T = rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _rawIn_out_isNaN_T_1 = rawIn_isSpecial & _rawIn_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign rawIn_isNaN = _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _rawIn_out_isInf_T_1 = ~_rawIn_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _rawIn_out_isInf_T_2 = rawIn_isSpecial & _rawIn_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign rawIn_isInf = _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _rawIn_out_sign_T = io_in_0[32]; // @[rawFloatFromRecFN.scala:59:25]
assign rawIn_sign = _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _rawIn_out_sExp_T = {1'h0, rawIn_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign rawIn_sExp = _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _rawIn_out_sig_T = ~rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _rawIn_out_sig_T_1 = {1'h0, _rawIn_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [22:0] _rawIn_out_sig_T_2 = io_in_0[22:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _rawIn_out_sig_T_3 = {_rawIn_out_sig_T_1, _rawIn_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign rawIn_sig = _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
assign io_out_0 = _io_out_T; // @[RecFNToRecFN.scala:44:5, :64:35]
wire _io_exceptionFlags_T = rawIn_sig[22]; // @[rawFloatFromRecFN.scala:55:23]
wire _io_exceptionFlags_T_1 = ~_io_exceptionFlags_T; // @[common.scala:82:{49,56}]
wire _io_exceptionFlags_T_2 = rawIn_isNaN & _io_exceptionFlags_T_1; // @[rawFloatFromRecFN.scala:55:23]
assign _io_exceptionFlags_T_3 = {_io_exceptionFlags_T_2, 4'h0}; // @[common.scala:82:46]
assign io_exceptionFlags = _io_exceptionFlags_T_3; // @[RecFNToRecFN.scala:44:5, :65:54]
assign io_out = io_out_0; // @[RecFNToRecFN.scala:44:5]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File MulAddRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN_interIo(expWidth: Int, sigWidth: Int) extends Bundle
{
//*** ENCODE SOME OF THESE CASES IN FEWER BITS?:
val isSigNaNAny = Bool()
val isNaNAOrB = Bool()
val isInfA = Bool()
val isZeroA = Bool()
val isInfB = Bool()
val isZeroB = Bool()
val signProd = Bool()
val isNaNC = Bool()
val isInfC = Bool()
val isZeroC = Bool()
val sExpSum = SInt((expWidth + 2).W)
val doSubMags = Bool()
val CIsDominant = Bool()
val CDom_CAlignDist = UInt(log2Ceil(sigWidth + 1).W)
val highAlignedSigC = UInt((sigWidth + 2).W)
val bit0AlignedSigC = UInt(1.W)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_preMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_preMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val mulAddA = Output(UInt(sigWidth.W))
val mulAddB = Output(UInt(sigWidth.W))
val mulAddC = Output(UInt((sigWidth * 2).W))
val toPostMul = Output(new MulAddRecFN_interIo(expWidth, sigWidth))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
//*** POSSIBLE TO REDUCE THIS BY 1 OR 2 BITS? (CURRENTLY 2 BITS BETWEEN
//*** UNSHIFTED C AND PRODUCT):
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawA = rawFloatFromRecFN(expWidth, sigWidth, io.a)
val rawB = rawFloatFromRecFN(expWidth, sigWidth, io.b)
val rawC = rawFloatFromRecFN(expWidth, sigWidth, io.c)
val signProd = rawA.sign ^ rawB.sign ^ io.op(1)
//*** REVIEW THE BIAS FOR 'sExpAlignedProd':
val sExpAlignedProd =
rawA.sExp +& rawB.sExp + (-(BigInt(1)<<expWidth) + sigWidth + 3).S
val doSubMags = signProd ^ rawC.sign ^ io.op(0)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sNatCAlignDist = sExpAlignedProd - rawC.sExp
val posNatCAlignDist = sNatCAlignDist(expWidth + 1, 0)
val isMinCAlign = rawA.isZero || rawB.isZero || (sNatCAlignDist < 0.S)
val CIsDominant =
! rawC.isZero && (isMinCAlign || (posNatCAlignDist <= sigWidth.U))
val CAlignDist =
Mux(isMinCAlign,
0.U,
Mux(posNatCAlignDist < (sigSumWidth - 1).U,
posNatCAlignDist(log2Ceil(sigSumWidth) - 1, 0),
(sigSumWidth - 1).U
)
)
val mainAlignedSigC =
(Mux(doSubMags, ~rawC.sig, rawC.sig) ## Fill(sigSumWidth - sigWidth + 2, doSubMags)).asSInt>>CAlignDist
val reduced4CExtra =
(orReduceBy4(rawC.sig<<((sigSumWidth - sigWidth - 1) & 3)) &
lowMask(
CAlignDist>>2,
//*** NOT NEEDED?:
// (sigSumWidth + 2)>>2,
(sigSumWidth - 1)>>2,
(sigSumWidth - sigWidth - 1)>>2
)
).orR
val alignedSigC =
Cat(mainAlignedSigC>>3,
Mux(doSubMags,
mainAlignedSigC(2, 0).andR && ! reduced4CExtra,
mainAlignedSigC(2, 0).orR || reduced4CExtra
)
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
io.mulAddA := rawA.sig
io.mulAddB := rawB.sig
io.mulAddC := alignedSigC(sigWidth * 2, 1)
io.toPostMul.isSigNaNAny :=
isSigNaNRawFloat(rawA) || isSigNaNRawFloat(rawB) ||
isSigNaNRawFloat(rawC)
io.toPostMul.isNaNAOrB := rawA.isNaN || rawB.isNaN
io.toPostMul.isInfA := rawA.isInf
io.toPostMul.isZeroA := rawA.isZero
io.toPostMul.isInfB := rawB.isInf
io.toPostMul.isZeroB := rawB.isZero
io.toPostMul.signProd := signProd
io.toPostMul.isNaNC := rawC.isNaN
io.toPostMul.isInfC := rawC.isInf
io.toPostMul.isZeroC := rawC.isZero
io.toPostMul.sExpSum :=
Mux(CIsDominant, rawC.sExp, sExpAlignedProd - sigWidth.S)
io.toPostMul.doSubMags := doSubMags
io.toPostMul.CIsDominant := CIsDominant
io.toPostMul.CDom_CAlignDist := CAlignDist(log2Ceil(sigWidth + 1) - 1, 0)
io.toPostMul.highAlignedSigC :=
alignedSigC(sigSumWidth - 1, sigWidth * 2 + 1)
io.toPostMul.bit0AlignedSigC := alignedSigC(0)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_postMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_postMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val fromPreMul = Input(new MulAddRecFN_interIo(expWidth, sigWidth))
val mulAddResult = Input(UInt((sigWidth * 2 + 1).W))
val roundingMode = Input(UInt(3.W))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth + 2))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_min = (io.roundingMode === round_min)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val opSignC = io.fromPreMul.signProd ^ io.fromPreMul.doSubMags
val sigSum =
Cat(Mux(io.mulAddResult(sigWidth * 2),
io.fromPreMul.highAlignedSigC + 1.U,
io.fromPreMul.highAlignedSigC
),
io.mulAddResult(sigWidth * 2 - 1, 0),
io.fromPreMul.bit0AlignedSigC
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val CDom_sign = opSignC
val CDom_sExp = io.fromPreMul.sExpSum - io.fromPreMul.doSubMags.zext
val CDom_absSigSum =
Mux(io.fromPreMul.doSubMags,
~sigSum(sigSumWidth - 1, sigWidth + 1),
0.U(1.W) ##
//*** IF GAP IS REDUCED TO 1 BIT, MUST REDUCE THIS COMPONENT TO 1 BIT TOO:
io.fromPreMul.highAlignedSigC(sigWidth + 1, sigWidth) ##
sigSum(sigSumWidth - 3, sigWidth + 2)
)
val CDom_absSigSumExtra =
Mux(io.fromPreMul.doSubMags,
(~sigSum(sigWidth, 1)).orR,
sigSum(sigWidth + 1, 1).orR
)
val CDom_mainSig =
(CDom_absSigSum<<io.fromPreMul.CDom_CAlignDist)(
sigWidth * 2 + 1, sigWidth - 3)
val CDom_reduced4SigExtra =
(orReduceBy4(CDom_absSigSum(sigWidth - 1, 0)<<(~sigWidth & 3)) &
lowMask(io.fromPreMul.CDom_CAlignDist>>2, 0, sigWidth>>2)).orR
val CDom_sig =
Cat(CDom_mainSig>>3,
CDom_mainSig(2, 0).orR || CDom_reduced4SigExtra ||
CDom_absSigSumExtra
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notCDom_signSigSum = sigSum(sigWidth * 2 + 3)
val notCDom_absSigSum =
Mux(notCDom_signSigSum,
~sigSum(sigWidth * 2 + 2, 0),
sigSum(sigWidth * 2 + 2, 0) + io.fromPreMul.doSubMags
)
val notCDom_reduced2AbsSigSum = orReduceBy2(notCDom_absSigSum)
val notCDom_normDistReduced2 = countLeadingZeros(notCDom_reduced2AbsSigSum)
val notCDom_nearNormDist = notCDom_normDistReduced2<<1
val notCDom_sExp = io.fromPreMul.sExpSum - notCDom_nearNormDist.asUInt.zext
val notCDom_mainSig =
(notCDom_absSigSum<<notCDom_nearNormDist)(
sigWidth * 2 + 3, sigWidth - 1)
val notCDom_reduced4SigExtra =
(orReduceBy2(
notCDom_reduced2AbsSigSum(sigWidth>>1, 0)<<((sigWidth>>1) & 1)) &
lowMask(notCDom_normDistReduced2>>1, 0, (sigWidth + 2)>>2)
).orR
val notCDom_sig =
Cat(notCDom_mainSig>>3,
notCDom_mainSig(2, 0).orR || notCDom_reduced4SigExtra
)
val notCDom_completeCancellation =
(notCDom_sig(sigWidth + 2, sigWidth + 1) === 0.U)
val notCDom_sign =
Mux(notCDom_completeCancellation,
roundingMode_min,
io.fromPreMul.signProd ^ notCDom_signSigSum
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notNaN_isInfProd = io.fromPreMul.isInfA || io.fromPreMul.isInfB
val notNaN_isInfOut = notNaN_isInfProd || io.fromPreMul.isInfC
val notNaN_addZeros =
(io.fromPreMul.isZeroA || io.fromPreMul.isZeroB) &&
io.fromPreMul.isZeroC
io.invalidExc :=
io.fromPreMul.isSigNaNAny ||
(io.fromPreMul.isInfA && io.fromPreMul.isZeroB) ||
(io.fromPreMul.isZeroA && io.fromPreMul.isInfB) ||
(! io.fromPreMul.isNaNAOrB &&
(io.fromPreMul.isInfA || io.fromPreMul.isInfB) &&
io.fromPreMul.isInfC &&
io.fromPreMul.doSubMags)
io.rawOut.isNaN := io.fromPreMul.isNaNAOrB || io.fromPreMul.isNaNC
io.rawOut.isInf := notNaN_isInfOut
//*** IMPROVE?:
io.rawOut.isZero :=
notNaN_addZeros ||
(! io.fromPreMul.CIsDominant && notCDom_completeCancellation)
io.rawOut.sign :=
(notNaN_isInfProd && io.fromPreMul.signProd) ||
(io.fromPreMul.isInfC && opSignC) ||
(notNaN_addZeros && ! roundingMode_min &&
io.fromPreMul.signProd && opSignC) ||
(notNaN_addZeros && roundingMode_min &&
(io.fromPreMul.signProd || opSignC)) ||
(! notNaN_isInfOut && ! notNaN_addZeros &&
Mux(io.fromPreMul.CIsDominant, CDom_sign, notCDom_sign))
io.rawOut.sExp := Mux(io.fromPreMul.CIsDominant, CDom_sExp, notCDom_sExp)
io.rawOut.sig := Mux(io.fromPreMul.CIsDominant, CDom_sig, notCDom_sig)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulAddRecFNToRaw_preMul =
Module(new MulAddRecFNToRaw_preMul(expWidth, sigWidth))
val mulAddRecFNToRaw_postMul =
Module(new MulAddRecFNToRaw_postMul(expWidth, sigWidth))
mulAddRecFNToRaw_preMul.io.op := io.op
mulAddRecFNToRaw_preMul.io.a := io.a
mulAddRecFNToRaw_preMul.io.b := io.b
mulAddRecFNToRaw_preMul.io.c := io.c
val mulAddResult =
(mulAddRecFNToRaw_preMul.io.mulAddA *
mulAddRecFNToRaw_preMul.io.mulAddB) +&
mulAddRecFNToRaw_preMul.io.mulAddC
mulAddRecFNToRaw_postMul.io.fromPreMul :=
mulAddRecFNToRaw_preMul.io.toPostMul
mulAddRecFNToRaw_postMul.io.mulAddResult := mulAddResult
mulAddRecFNToRaw_postMul.io.roundingMode := io.roundingMode
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN =
Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0))
roundRawFNToRecFN.io.invalidExc := mulAddRecFNToRaw_postMul.io.invalidExc
roundRawFNToRecFN.io.infiniteExc := false.B
roundRawFNToRecFN.io.in := mulAddRecFNToRaw_postMul.io.rawOut
roundRawFNToRecFN.io.roundingMode := io.roundingMode
roundRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
| module MulAddRecFN_e8_s24( // @[MulAddRecFN.scala:300:7]
input [32:0] io_a, // @[MulAddRecFN.scala:303:16]
input [32:0] io_b, // @[MulAddRecFN.scala:303:16]
output [32:0] io_out // @[MulAddRecFN.scala:303:16]
);
wire _mulAddRecFNToRaw_postMul_io_invalidExc; // @[MulAddRecFN.scala:319:15]
wire _mulAddRecFNToRaw_postMul_io_rawOut_isNaN; // @[MulAddRecFN.scala:319:15]
wire _mulAddRecFNToRaw_postMul_io_rawOut_isInf; // @[MulAddRecFN.scala:319:15]
wire _mulAddRecFNToRaw_postMul_io_rawOut_isZero; // @[MulAddRecFN.scala:319:15]
wire _mulAddRecFNToRaw_postMul_io_rawOut_sign; // @[MulAddRecFN.scala:319:15]
wire [9:0] _mulAddRecFNToRaw_postMul_io_rawOut_sExp; // @[MulAddRecFN.scala:319:15]
wire [26:0] _mulAddRecFNToRaw_postMul_io_rawOut_sig; // @[MulAddRecFN.scala:319:15]
wire [23:0] _mulAddRecFNToRaw_preMul_io_mulAddA; // @[MulAddRecFN.scala:317:15]
wire [23:0] _mulAddRecFNToRaw_preMul_io_mulAddB; // @[MulAddRecFN.scala:317:15]
wire [47:0] _mulAddRecFNToRaw_preMul_io_mulAddC; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isSigNaNAny; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isNaNAOrB; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isInfA; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isZeroA; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isInfB; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isZeroB; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_signProd; // @[MulAddRecFN.scala:317:15]
wire [9:0] _mulAddRecFNToRaw_preMul_io_toPostMul_sExpSum; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_doSubMags; // @[MulAddRecFN.scala:317:15]
wire [4:0] _mulAddRecFNToRaw_preMul_io_toPostMul_CDom_CAlignDist; // @[MulAddRecFN.scala:317:15]
wire [25:0] _mulAddRecFNToRaw_preMul_io_toPostMul_highAlignedSigC; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_bit0AlignedSigC; // @[MulAddRecFN.scala:317:15]
wire [32:0] io_a_0 = io_a; // @[MulAddRecFN.scala:300:7]
wire [32:0] io_b_0 = io_b; // @[MulAddRecFN.scala:300:7]
wire io_detectTininess = 1'h1; // @[MulAddRecFN.scala:300:7, :303:16, :317:15, :319:15, :339:15]
wire [2:0] io_roundingMode = 3'h0; // @[MulAddRecFN.scala:300:7, :303:16, :319:15, :339:15]
wire [32:0] io_c = 33'h0; // @[MulAddRecFN.scala:300:7, :303:16, :317:15]
wire [1:0] io_op = 2'h0; // @[MulAddRecFN.scala:300:7, :303:16, :317:15]
wire [32:0] io_out_0; // @[MulAddRecFN.scala:300:7]
wire [4:0] io_exceptionFlags; // @[MulAddRecFN.scala:300:7]
wire [47:0] _mulAddResult_T = {24'h0, _mulAddRecFNToRaw_preMul_io_mulAddA} * {24'h0, _mulAddRecFNToRaw_preMul_io_mulAddB}; // @[MulAddRecFN.scala:317:15, :327:45]
wire [48:0] mulAddResult = {1'h0, _mulAddResult_T} + {1'h0, _mulAddRecFNToRaw_preMul_io_mulAddC}; // @[MulAddRecFN.scala:317:15, :327:45, :328:50]
MulAddRecFNToRaw_preMul_e8_s24 mulAddRecFNToRaw_preMul ( // @[MulAddRecFN.scala:317:15]
.io_a (io_a_0), // @[MulAddRecFN.scala:300:7]
.io_b (io_b_0), // @[MulAddRecFN.scala:300:7]
.io_mulAddA (_mulAddRecFNToRaw_preMul_io_mulAddA),
.io_mulAddB (_mulAddRecFNToRaw_preMul_io_mulAddB),
.io_mulAddC (_mulAddRecFNToRaw_preMul_io_mulAddC),
.io_toPostMul_isSigNaNAny (_mulAddRecFNToRaw_preMul_io_toPostMul_isSigNaNAny),
.io_toPostMul_isNaNAOrB (_mulAddRecFNToRaw_preMul_io_toPostMul_isNaNAOrB),
.io_toPostMul_isInfA (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfA),
.io_toPostMul_isZeroA (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroA),
.io_toPostMul_isInfB (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfB),
.io_toPostMul_isZeroB (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroB),
.io_toPostMul_signProd (_mulAddRecFNToRaw_preMul_io_toPostMul_signProd),
.io_toPostMul_sExpSum (_mulAddRecFNToRaw_preMul_io_toPostMul_sExpSum),
.io_toPostMul_doSubMags (_mulAddRecFNToRaw_preMul_io_toPostMul_doSubMags),
.io_toPostMul_CDom_CAlignDist (_mulAddRecFNToRaw_preMul_io_toPostMul_CDom_CAlignDist),
.io_toPostMul_highAlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_highAlignedSigC),
.io_toPostMul_bit0AlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_bit0AlignedSigC)
); // @[MulAddRecFN.scala:317:15]
MulAddRecFNToRaw_postMul_e8_s24 mulAddRecFNToRaw_postMul ( // @[MulAddRecFN.scala:319:15]
.io_fromPreMul_isSigNaNAny (_mulAddRecFNToRaw_preMul_io_toPostMul_isSigNaNAny), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isNaNAOrB (_mulAddRecFNToRaw_preMul_io_toPostMul_isNaNAOrB), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isInfA (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfA), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isZeroA (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroA), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isInfB (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfB), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isZeroB (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroB), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_signProd (_mulAddRecFNToRaw_preMul_io_toPostMul_signProd), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_sExpSum (_mulAddRecFNToRaw_preMul_io_toPostMul_sExpSum), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_doSubMags (_mulAddRecFNToRaw_preMul_io_toPostMul_doSubMags), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_CDom_CAlignDist (_mulAddRecFNToRaw_preMul_io_toPostMul_CDom_CAlignDist), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_highAlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_highAlignedSigC), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_bit0AlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_bit0AlignedSigC), // @[MulAddRecFN.scala:317:15]
.io_mulAddResult (mulAddResult), // @[MulAddRecFN.scala:328:50]
.io_invalidExc (_mulAddRecFNToRaw_postMul_io_invalidExc),
.io_rawOut_isNaN (_mulAddRecFNToRaw_postMul_io_rawOut_isNaN),
.io_rawOut_isInf (_mulAddRecFNToRaw_postMul_io_rawOut_isInf),
.io_rawOut_isZero (_mulAddRecFNToRaw_postMul_io_rawOut_isZero),
.io_rawOut_sign (_mulAddRecFNToRaw_postMul_io_rawOut_sign),
.io_rawOut_sExp (_mulAddRecFNToRaw_postMul_io_rawOut_sExp),
.io_rawOut_sig (_mulAddRecFNToRaw_postMul_io_rawOut_sig)
); // @[MulAddRecFN.scala:319:15]
RoundRawFNToRecFN_e8_s24 roundRawFNToRecFN ( // @[MulAddRecFN.scala:339:15]
.io_invalidExc (_mulAddRecFNToRaw_postMul_io_invalidExc), // @[MulAddRecFN.scala:319:15]
.io_in_isNaN (_mulAddRecFNToRaw_postMul_io_rawOut_isNaN), // @[MulAddRecFN.scala:319:15]
.io_in_isInf (_mulAddRecFNToRaw_postMul_io_rawOut_isInf), // @[MulAddRecFN.scala:319:15]
.io_in_isZero (_mulAddRecFNToRaw_postMul_io_rawOut_isZero), // @[MulAddRecFN.scala:319:15]
.io_in_sign (_mulAddRecFNToRaw_postMul_io_rawOut_sign), // @[MulAddRecFN.scala:319:15]
.io_in_sExp (_mulAddRecFNToRaw_postMul_io_rawOut_sExp), // @[MulAddRecFN.scala:319:15]
.io_in_sig (_mulAddRecFNToRaw_postMul_io_rawOut_sig), // @[MulAddRecFN.scala:319:15]
.io_out (io_out_0),
.io_exceptionFlags (io_exceptionFlags)
); // @[MulAddRecFN.scala:339:15]
assign io_out = io_out_0; // @[MulAddRecFN.scala:300:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RoundAnyRawFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util.Fill
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundAnyRawFNToRecFN(
inExpWidth: Int,
inSigWidth: Int,
outExpWidth: Int,
outSigWidth: Int,
options: Int
)
extends RawModule
{
override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(inExpWidth, inSigWidth))
// (allowed exponent range has limits)
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0)
val effectiveInSigWidth =
if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1
val neverUnderflows =
((options &
(flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact)
) != 0) ||
(inExpWidth < outExpWidth)
val neverOverflows =
((options & flRoundOpt_neverOverflows) != 0) ||
(inExpWidth < outExpWidth)
val outNaNExp = BigInt(7)<<(outExpWidth - 2)
val outInfExp = BigInt(6)<<(outExpWidth - 2)
val outMaxFiniteExp = outInfExp - 1
val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2
val outMinNonzeroExp = outMinNormExp - outSigWidth + 1
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_near_even = (io.roundingMode === round_near_even)
val roundingMode_minMag = (io.roundingMode === round_minMag)
val roundingMode_min = (io.roundingMode === round_min)
val roundingMode_max = (io.roundingMode === round_max)
val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag)
val roundingMode_odd = (io.roundingMode === round_odd)
val roundMagUp =
(roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sAdjustedExp =
if (inExpWidth < outExpWidth)
(io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
)(outExpWidth, 0).zext
else if (inExpWidth == outExpWidth)
io.in.sExp
else
io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
val adjustedSig =
if (inSigWidth <= outSigWidth + 2)
io.in.sig<<(outSigWidth - inSigWidth + 2)
else
(io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ##
io.in.sig(inSigWidth - outSigWidth - 2, 0).orR
)
val doShiftSigDown1 =
if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2)
val common_expOut = Wire(UInt((outExpWidth + 1).W))
val common_fractOut = Wire(UInt((outSigWidth - 1).W))
val common_overflow = Wire(Bool())
val common_totalUnderflow = Wire(Bool())
val common_underflow = Wire(Bool())
val common_inexact = Wire(Bool())
if (
neverOverflows && neverUnderflows
&& (effectiveInSigWidth <= outSigWidth)
) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1
common_fractOut :=
Mux(doShiftSigDown1,
adjustedSig(outSigWidth + 1, 3),
adjustedSig(outSigWidth, 2)
)
common_overflow := false.B
common_totalUnderflow := false.B
common_underflow := false.B
common_inexact := false.B
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundMask =
if (neverUnderflows)
0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W)
else
(lowMask(
sAdjustedExp(outExpWidth, 0),
outMinNormExp - outSigWidth - 1,
outMinNormExp
) | doShiftSigDown1) ##
3.U(2.W)
val shiftedRoundMask = 0.U(1.W) ## roundMask>>1
val roundPosMask = ~shiftedRoundMask & roundMask
val roundPosBit = (adjustedSig & roundPosMask).orR
val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR
val anyRound = roundPosBit || anyRoundExtra
val roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
roundPosBit) ||
(roundMagUp && anyRound)
val roundedSig: Bits =
Mux(roundIncr,
(((adjustedSig | roundMask)>>2) +& 1.U) &
~Mux(roundingMode_near_even && roundPosBit &&
! anyRoundExtra,
roundMask>>1,
0.U((outSigWidth + 2).W)
),
(adjustedSig & ~roundMask)>>2 |
Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U)
)
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext
common_expOut := sRoundedExp(outExpWidth, 0)
common_fractOut :=
Mux(doShiftSigDown1,
roundedSig(outSigWidth - 1, 1),
roundedSig(outSigWidth - 2, 0)
)
common_overflow :=
(if (neverOverflows) false.B else
//*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?:
(sRoundedExp>>(outExpWidth - 1) >= 3.S))
common_totalUnderflow :=
(if (neverUnderflows) false.B else
//*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?:
(sRoundedExp < outMinNonzeroExp.S))
val unboundedRange_roundPosBit =
Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1))
val unboundedRange_anyRound =
(doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR
val unboundedRange_roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
unboundedRange_roundPosBit) ||
(roundMagUp && unboundedRange_anyRound)
val roundCarry =
Mux(doShiftSigDown1,
roundedSig(outSigWidth + 1),
roundedSig(outSigWidth)
)
common_underflow :=
(if (neverUnderflows) false.B else
common_totalUnderflow ||
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
(anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) &&
Mux(doShiftSigDown1, roundMask(3), roundMask(2)) &&
! ((io.detectTininess === tininess_afterRounding) &&
! Mux(doShiftSigDown1,
roundMask(4),
roundMask(3)
) &&
roundCarry && roundPosBit &&
unboundedRange_roundIncr)))
common_inexact := common_totalUnderflow || anyRound
}
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val isNaNOut = io.invalidExc || io.in.isNaN
val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf
val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero
val overflow = commonCase && common_overflow
val underflow = commonCase && common_underflow
val inexact = overflow || (commonCase && common_inexact)
val overflow_roundMagUp =
roundingMode_near_even || roundingMode_near_maxMag || roundMagUp
val pegMinNonzeroMagOut =
commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd)
val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp
val notNaN_isInfOut =
notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp)
val signOut = Mux(isNaNOut, false.B, io.in.sign)
val expOut =
(common_expOut &
~Mux(io.in.isZero || common_totalUnderflow,
(BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMinNonzeroMagOut,
~outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMaxFiniteMagOut,
(BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W),
0.U
) &
~Mux(notNaN_isInfOut,
(BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
)) |
Mux(pegMinNonzeroMagOut,
outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) |
Mux(pegMaxFiniteMagOut,
outMaxFiniteExp.U((outExpWidth + 1).W),
0.U
) |
Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) |
Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U)
val fractOut =
Mux(isNaNOut || io.in.isZero || common_totalUnderflow,
Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U),
common_fractOut
) |
Fill(outSigWidth - 1, pegMaxFiniteMagOut)
io.out := signOut ## expOut ## fractOut
io.exceptionFlags :=
io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int)
extends RawModule
{
override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(expWidth, sigWidth + 2))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
expWidth, sigWidth + 2, expWidth, sigWidth, options))
roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc
roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc
roundAnyRawFNToRecFN.io.in := io.in
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
| module RoundAnyRawFNToRecFN_ie7_is64_oe8_os24_3( // @[RoundAnyRawFNToRecFN.scala:48:5]
input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [8:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [64:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [2:0] io_roundingMode, // @[RoundAnyRawFNToRecFN.scala:58:16]
output [32:0] io_out, // @[RoundAnyRawFNToRecFN.scala:58:16]
output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:58:16]
);
wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [8:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [64:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [2:0] io_roundingMode_0 = io_roundingMode; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [24:0] _roundMask_T = 25'h0; // @[RoundAnyRawFNToRecFN.scala:153:36]
wire [8:0] _expOut_T_4 = 9'h194; // @[RoundAnyRawFNToRecFN.scala:258:19]
wire [26:0] roundMask = 27'h3; // @[RoundAnyRawFNToRecFN.scala:153:55]
wire [27:0] _shiftedRoundMask_T = 28'h3; // @[RoundAnyRawFNToRecFN.scala:162:41]
wire [26:0] shiftedRoundMask = 27'h1; // @[RoundAnyRawFNToRecFN.scala:162:53]
wire [26:0] _roundPosMask_T = 27'h7FFFFFE; // @[RoundAnyRawFNToRecFN.scala:163:28]
wire [26:0] roundPosMask = 27'h2; // @[RoundAnyRawFNToRecFN.scala:163:46]
wire [26:0] _roundedSig_T_10 = 27'h7FFFFFC; // @[RoundAnyRawFNToRecFN.scala:180:32]
wire [25:0] _roundedSig_T_6 = 26'h1; // @[RoundAnyRawFNToRecFN.scala:177:35, :181:67]
wire [25:0] _roundedSig_T_14 = 26'h1; // @[RoundAnyRawFNToRecFN.scala:177:35, :181:67]
wire [8:0] _expOut_T_6 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14, :265:14]
wire [8:0] _expOut_T_9 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14, :265:14]
wire [8:0] _expOut_T_12 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14, :265:14]
wire [8:0] _expOut_T_5 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:257:18]
wire [8:0] _expOut_T_8 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:261:18]
wire [8:0] _expOut_T_11 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:265:18]
wire [8:0] _expOut_T_14 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:269:16]
wire [8:0] _expOut_T_16 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:273:16]
wire [8:0] _expOut_T_18 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:277:16]
wire [8:0] _expOut_T_20 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:278:16]
wire [22:0] _fractOut_T_2 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:281:16, :284:13]
wire [22:0] _fractOut_T_4 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:281:16, :284:13]
wire [1:0] _io_exceptionFlags_T = 2'h0; // @[RoundAnyRawFNToRecFN.scala:288:23]
wire [2:0] _io_exceptionFlags_T_1 = 3'h0; // @[RoundAnyRawFNToRecFN.scala:288:41]
wire [3:0] _io_exceptionFlags_T_2 = 4'h0; // @[RoundAnyRawFNToRecFN.scala:288:53]
wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire _commonCase_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:237:22]
wire _commonCase_T_1 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:237:36]
wire _commonCase_T_2 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:237:33]
wire io_invalidExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isNaN = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isInf = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire common_overflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:124:37]
wire common_totalUnderflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:125:37]
wire common_underflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:126:37]
wire _unboundedRange_anyRound_T_1 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:205:30]
wire isNaNOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:235:34]
wire notNaN_isSpecialInfOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:236:49]
wire overflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:238:32]
wire underflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:239:32]
wire _pegMinNonzeroMagOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:20]
wire pegMinNonzeroMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:45]
wire pegMaxFiniteMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:39]
wire _notNaN_isInfOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:248:45]
wire notNaN_isInfOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:248:32]
wire _expOut_T = io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :253:32]
wire _fractOut_T = io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :280:22]
wire signOut = io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :250:22]
wire [32:0] _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:286:33]
wire [4:0] _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:288:66]
wire [32:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire roundingMode_near_even = io_roundingMode_0 == 3'h0; // @[RoundAnyRawFNToRecFN.scala:48:5, :90:53, :288:41]
wire roundingMode_minMag = io_roundingMode_0 == 3'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :91:53]
wire roundingMode_min = io_roundingMode_0 == 3'h2; // @[RoundAnyRawFNToRecFN.scala:48:5, :92:53]
wire roundingMode_max = io_roundingMode_0 == 3'h3; // @[RoundAnyRawFNToRecFN.scala:48:5, :93:53]
wire roundingMode_near_maxMag = io_roundingMode_0 == 3'h4; // @[RoundAnyRawFNToRecFN.scala:48:5, :94:53]
wire roundingMode_odd = io_roundingMode_0 == 3'h6; // @[RoundAnyRawFNToRecFN.scala:48:5, :95:53]
wire _roundMagUp_T = roundingMode_min & io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :92:53, :98:27]
wire _roundMagUp_T_1 = ~io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :98:66]
wire _roundMagUp_T_2 = roundingMode_max & _roundMagUp_T_1; // @[RoundAnyRawFNToRecFN.scala:93:53, :98:{63,66}]
wire roundMagUp = _roundMagUp_T | _roundMagUp_T_2; // @[RoundAnyRawFNToRecFN.scala:98:{27,42,63}]
wire [9:0] _sAdjustedExp_T = {io_in_sExp_0[8], io_in_sExp_0} + 10'h80; // @[RoundAnyRawFNToRecFN.scala:48:5, :104:25]
wire [8:0] _sAdjustedExp_T_1 = _sAdjustedExp_T[8:0]; // @[RoundAnyRawFNToRecFN.scala:104:25, :106:14]
wire [9:0] sAdjustedExp = {1'h0, _sAdjustedExp_T_1}; // @[RoundAnyRawFNToRecFN.scala:106:{14,31}]
wire [25:0] _adjustedSig_T = io_in_sig_0[64:39]; // @[RoundAnyRawFNToRecFN.scala:48:5, :116:23]
wire [38:0] _adjustedSig_T_1 = io_in_sig_0[38:0]; // @[RoundAnyRawFNToRecFN.scala:48:5, :117:26]
wire _adjustedSig_T_2 = |_adjustedSig_T_1; // @[RoundAnyRawFNToRecFN.scala:117:{26,60}]
wire [26:0] adjustedSig = {_adjustedSig_T, _adjustedSig_T_2}; // @[RoundAnyRawFNToRecFN.scala:116:{23,66}, :117:60]
wire [8:0] _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:187:37]
wire [8:0] common_expOut; // @[RoundAnyRawFNToRecFN.scala:122:31]
wire [22:0] _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:189:16]
wire [22:0] common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31]
wire _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:230:49]
wire common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37]
wire [26:0] _roundPosBit_T = adjustedSig & 27'h2; // @[RoundAnyRawFNToRecFN.scala:116:66, :163:46, :164:40]
wire roundPosBit = |_roundPosBit_T; // @[RoundAnyRawFNToRecFN.scala:164:{40,56}]
wire [26:0] _anyRoundExtra_T = adjustedSig & 27'h1; // @[RoundAnyRawFNToRecFN.scala:116:66, :162:53, :165:42]
wire anyRoundExtra = |_anyRoundExtra_T; // @[RoundAnyRawFNToRecFN.scala:165:{42,62}]
wire anyRound = roundPosBit | anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:164:56, :165:62, :166:36]
assign _common_inexact_T = anyRound; // @[RoundAnyRawFNToRecFN.scala:166:36, :230:49]
wire _GEN = roundingMode_near_even | roundingMode_near_maxMag; // @[RoundAnyRawFNToRecFN.scala:90:53, :94:53, :169:38]
wire _roundIncr_T; // @[RoundAnyRawFNToRecFN.scala:169:38]
assign _roundIncr_T = _GEN; // @[RoundAnyRawFNToRecFN.scala:169:38]
wire _unboundedRange_roundIncr_T; // @[RoundAnyRawFNToRecFN.scala:207:38]
assign _unboundedRange_roundIncr_T = _GEN; // @[RoundAnyRawFNToRecFN.scala:169:38, :207:38]
wire _overflow_roundMagUp_T; // @[RoundAnyRawFNToRecFN.scala:243:32]
assign _overflow_roundMagUp_T = _GEN; // @[RoundAnyRawFNToRecFN.scala:169:38, :243:32]
wire _roundIncr_T_1 = _roundIncr_T & roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :169:{38,67}]
wire _roundIncr_T_2 = roundMagUp & anyRound; // @[RoundAnyRawFNToRecFN.scala:98:42, :166:36, :171:29]
wire roundIncr = _roundIncr_T_1 | _roundIncr_T_2; // @[RoundAnyRawFNToRecFN.scala:169:67, :170:31, :171:29]
wire [26:0] _roundedSig_T = adjustedSig | 27'h3; // @[RoundAnyRawFNToRecFN.scala:116:66, :153:55, :174:32]
wire [24:0] _roundedSig_T_1 = _roundedSig_T[26:2]; // @[RoundAnyRawFNToRecFN.scala:174:{32,44}]
wire [25:0] _roundedSig_T_2 = {1'h0, _roundedSig_T_1} + 26'h1; // @[RoundAnyRawFNToRecFN.scala:174:{44,49}, :177:35, :181:67]
wire _roundedSig_T_3 = roundingMode_near_even & roundPosBit; // @[RoundAnyRawFNToRecFN.scala:90:53, :164:56, :175:49]
wire _roundedSig_T_4 = ~anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:165:62, :176:30]
wire _roundedSig_T_5 = _roundedSig_T_3 & _roundedSig_T_4; // @[RoundAnyRawFNToRecFN.scala:175:{49,64}, :176:30]
wire [25:0] _roundedSig_T_7 = {25'h0, _roundedSig_T_5}; // @[RoundAnyRawFNToRecFN.scala:175:{25,64}]
wire [25:0] _roundedSig_T_8 = ~_roundedSig_T_7; // @[RoundAnyRawFNToRecFN.scala:175:{21,25}]
wire [25:0] _roundedSig_T_9 = _roundedSig_T_2 & _roundedSig_T_8; // @[RoundAnyRawFNToRecFN.scala:174:{49,57}, :175:21]
wire [26:0] _roundedSig_T_11 = adjustedSig & 27'h7FFFFFC; // @[RoundAnyRawFNToRecFN.scala:116:66, :180:{30,32}]
wire [24:0] _roundedSig_T_12 = _roundedSig_T_11[26:2]; // @[RoundAnyRawFNToRecFN.scala:180:{30,43}]
wire _roundedSig_T_13 = roundingMode_odd & anyRound; // @[RoundAnyRawFNToRecFN.scala:95:53, :166:36, :181:42]
wire [25:0] _roundedSig_T_15 = {25'h0, _roundedSig_T_13}; // @[RoundAnyRawFNToRecFN.scala:181:{24,42}]
wire [25:0] _roundedSig_T_16 = {1'h0, _roundedSig_T_12} | _roundedSig_T_15; // @[RoundAnyRawFNToRecFN.scala:180:{43,47}, :181:24]
wire [25:0] roundedSig = roundIncr ? _roundedSig_T_9 : _roundedSig_T_16; // @[RoundAnyRawFNToRecFN.scala:170:31, :173:16, :174:57, :180:47]
wire [1:0] _sRoundedExp_T = roundedSig[25:24]; // @[RoundAnyRawFNToRecFN.scala:173:16, :185:54]
wire [2:0] _sRoundedExp_T_1 = {1'h0, _sRoundedExp_T}; // @[RoundAnyRawFNToRecFN.scala:185:{54,76}]
wire [10:0] sRoundedExp = {sAdjustedExp[9], sAdjustedExp} + {{8{_sRoundedExp_T_1[2]}}, _sRoundedExp_T_1}; // @[RoundAnyRawFNToRecFN.scala:106:31, :185:{40,76}]
assign _common_expOut_T = sRoundedExp[8:0]; // @[RoundAnyRawFNToRecFN.scala:185:40, :187:37]
assign common_expOut = _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:122:31, :187:37]
wire [22:0] _common_fractOut_T = roundedSig[23:1]; // @[RoundAnyRawFNToRecFN.scala:173:16, :190:27]
wire [22:0] _common_fractOut_T_1 = roundedSig[22:0]; // @[RoundAnyRawFNToRecFN.scala:173:16, :191:27]
assign _common_fractOut_T_2 = _common_fractOut_T_1; // @[RoundAnyRawFNToRecFN.scala:189:16, :191:27]
assign common_fractOut = _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:123:31, :189:16]
wire _unboundedRange_roundPosBit_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:116:66, :203:45]
wire _unboundedRange_anyRound_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:116:66, :203:45, :205:44]
wire _unboundedRange_roundPosBit_T_1 = adjustedSig[1]; // @[RoundAnyRawFNToRecFN.scala:116:66, :203:61]
wire unboundedRange_roundPosBit = _unboundedRange_roundPosBit_T_1; // @[RoundAnyRawFNToRecFN.scala:203:{16,61}]
wire [1:0] _unboundedRange_anyRound_T_2 = adjustedSig[1:0]; // @[RoundAnyRawFNToRecFN.scala:116:66, :205:63]
wire _unboundedRange_anyRound_T_3 = |_unboundedRange_anyRound_T_2; // @[RoundAnyRawFNToRecFN.scala:205:{63,70}]
wire unboundedRange_anyRound = _unboundedRange_anyRound_T_3; // @[RoundAnyRawFNToRecFN.scala:205:{49,70}]
wire _unboundedRange_roundIncr_T_1 = _unboundedRange_roundIncr_T & unboundedRange_roundPosBit; // @[RoundAnyRawFNToRecFN.scala:203:16, :207:{38,67}]
wire _unboundedRange_roundIncr_T_2 = roundMagUp & unboundedRange_anyRound; // @[RoundAnyRawFNToRecFN.scala:98:42, :205:49, :209:29]
wire unboundedRange_roundIncr = _unboundedRange_roundIncr_T_1 | _unboundedRange_roundIncr_T_2; // @[RoundAnyRawFNToRecFN.scala:207:67, :208:46, :209:29]
wire _roundCarry_T = roundedSig[25]; // @[RoundAnyRawFNToRecFN.scala:173:16, :212:27]
wire _roundCarry_T_1 = roundedSig[24]; // @[RoundAnyRawFNToRecFN.scala:173:16, :213:27]
wire roundCarry = _roundCarry_T_1; // @[RoundAnyRawFNToRecFN.scala:211:16, :213:27]
assign common_inexact = _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:127:37, :230:49]
wire _commonCase_T_3 = ~io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :237:64]
wire commonCase = _commonCase_T_3; // @[RoundAnyRawFNToRecFN.scala:237:{61,64}]
wire _inexact_T = commonCase & common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37, :237:61, :240:43]
wire inexact = _inexact_T; // @[RoundAnyRawFNToRecFN.scala:240:{28,43}]
wire overflow_roundMagUp = _overflow_roundMagUp_T | roundMagUp; // @[RoundAnyRawFNToRecFN.scala:98:42, :243:{32,60}]
wire _pegMinNonzeroMagOut_T_1 = roundMagUp | roundingMode_odd; // @[RoundAnyRawFNToRecFN.scala:95:53, :98:42, :245:60]
wire _pegMaxFiniteMagOut_T = ~overflow_roundMagUp; // @[RoundAnyRawFNToRecFN.scala:243:60, :246:42]
wire [8:0] _expOut_T_1 = _expOut_T ? 9'h1C0 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:{18,32}]
wire [8:0] _expOut_T_2 = ~_expOut_T_1; // @[RoundAnyRawFNToRecFN.scala:253:{14,18}]
wire [8:0] _expOut_T_3 = common_expOut & _expOut_T_2; // @[RoundAnyRawFNToRecFN.scala:122:31, :252:24, :253:14]
wire [8:0] _expOut_T_7 = _expOut_T_3; // @[RoundAnyRawFNToRecFN.scala:252:24, :256:17]
wire [8:0] _expOut_T_10 = _expOut_T_7; // @[RoundAnyRawFNToRecFN.scala:256:17, :260:17]
wire [8:0] _expOut_T_13 = _expOut_T_10; // @[RoundAnyRawFNToRecFN.scala:260:17, :264:17]
wire [8:0] _expOut_T_15 = _expOut_T_13; // @[RoundAnyRawFNToRecFN.scala:264:17, :268:18]
wire [8:0] _expOut_T_17 = _expOut_T_15; // @[RoundAnyRawFNToRecFN.scala:268:18, :272:15]
wire [8:0] _expOut_T_19 = _expOut_T_17; // @[RoundAnyRawFNToRecFN.scala:272:15, :276:15]
wire [8:0] expOut = _expOut_T_19; // @[RoundAnyRawFNToRecFN.scala:276:15, :277:73]
wire _fractOut_T_1 = _fractOut_T; // @[RoundAnyRawFNToRecFN.scala:280:{22,38}]
wire [22:0] _fractOut_T_3 = _fractOut_T_1 ? 23'h0 : common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31, :280:{12,38}, :281:16, :284:13]
wire [22:0] fractOut = _fractOut_T_3; // @[RoundAnyRawFNToRecFN.scala:280:12, :283:11]
wire [9:0] _io_out_T = {signOut, expOut}; // @[RoundAnyRawFNToRecFN.scala:250:22, :277:73, :286:23]
assign _io_out_T_1 = {_io_out_T, fractOut}; // @[RoundAnyRawFNToRecFN.scala:283:11, :286:{23,33}]
assign io_out_0 = _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:48:5, :286:33]
assign _io_exceptionFlags_T_3 = {4'h0, inexact}; // @[RoundAnyRawFNToRecFN.scala:240:28, :288:{53,66}]
assign io_exceptionFlags_0 = _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:66]
assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File DCache.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import freechips.rocketchip.amba.AMBAProt
import freechips.rocketchip.diplomacy.{BufferParams}
import freechips.rocketchip.prci.{ClockCrossingType, RationalCrossing, SynchronousCrossing, AsynchronousCrossing, CreditedCrossing}
import freechips.rocketchip.tile.{CoreBundle, LookupByHartId}
import freechips.rocketchip.tilelink.{TLFIFOFixer,ClientMetadata, TLBundleA, TLAtomics, TLBundleB, TLPermissions}
import freechips.rocketchip.tilelink.TLMessages.{AccessAck, HintAck, AccessAckData, Grant, GrantData, ReleaseAck}
import freechips.rocketchip.util.{CanHaveErrors, ClockGate, IdentityCode, ReplacementPolicy, DescribedSRAM, property}
import freechips.rocketchip.util.BooleanToAugmentedBoolean
import freechips.rocketchip.util.UIntToAugmentedUInt
import freechips.rocketchip.util.UIntIsOneOf
import freechips.rocketchip.util.IntToAugmentedInt
import freechips.rocketchip.util.SeqToAugmentedSeq
import freechips.rocketchip.util.SeqBoolBitwiseOps
// TODO: delete this trait once deduplication is smart enough to avoid globally inlining matching circuits
trait InlineInstance { self: chisel3.experimental.BaseModule =>
chisel3.experimental.annotate(
new chisel3.experimental.ChiselAnnotation {
def toFirrtl: firrtl.annotations.Annotation = firrtl.passes.InlineAnnotation(self.toNamed) } )
}
class DCacheErrors(implicit p: Parameters) extends L1HellaCacheBundle()(p)
with CanHaveErrors {
val correctable = (cacheParams.tagCode.canCorrect || cacheParams.dataCode.canCorrect).option(Valid(UInt(paddrBits.W)))
val uncorrectable = (cacheParams.tagCode.canDetect || cacheParams.dataCode.canDetect).option(Valid(UInt(paddrBits.W)))
val bus = Valid(UInt(paddrBits.W))
}
class DCacheDataReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) {
val addr = UInt(untagBits.W)
val write = Bool()
val wdata = UInt((encBits * rowBytes / eccBytes).W)
val wordMask = UInt((rowBytes / subWordBytes).W)
val eccMask = UInt((wordBytes / eccBytes).W)
val way_en = UInt(nWays.W)
}
class DCacheDataArray(implicit p: Parameters) extends L1HellaCacheModule()(p) {
val io = IO(new Bundle {
val req = Flipped(Valid(new DCacheDataReq))
val resp = Output(Vec(nWays, UInt((req.bits.wdata.getWidth).W)))
})
require(rowBits % subWordBits == 0, "rowBits must be a multiple of subWordBits")
val eccMask = if (eccBits == subWordBits) Seq(true.B) else io.req.bits.eccMask.asBools
val wMask = if (nWays == 1) eccMask else (0 until nWays).flatMap(i => eccMask.map(_ && io.req.bits.way_en(i)))
val wWords = io.req.bits.wdata.grouped(encBits * (subWordBits / eccBits))
val addr = io.req.bits.addr >> rowOffBits
val data_arrays = Seq.tabulate(rowBits / subWordBits) {
i =>
DescribedSRAM(
name = s"${tileParams.baseName}_dcache_data_arrays_${i}",
desc = "DCache Data Array",
size = nSets * cacheBlockBytes / rowBytes,
data = Vec(nWays * (subWordBits / eccBits), UInt(encBits.W))
)
}
val rdata = for ((array , i) <- data_arrays.zipWithIndex) yield {
val valid = io.req.valid && ((data_arrays.size == 1).B || io.req.bits.wordMask(i))
when (valid && io.req.bits.write) {
val wMaskSlice = (0 until wMask.size).filter(j => i % (wordBits/subWordBits) == (j % (wordBytes/eccBytes)) / (subWordBytes/eccBytes)).map(wMask(_))
val wData = wWords(i).grouped(encBits)
array.write(addr, VecInit((0 until nWays).flatMap(i => wData)), wMaskSlice)
}
val data = array.read(addr, valid && !io.req.bits.write)
data.grouped(subWordBits / eccBits).map(_.asUInt).toSeq
}
(io.resp zip rdata.transpose).foreach { case (resp, data) => resp := data.asUInt }
}
class DCacheMetadataReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) {
val write = Bool()
val addr = UInt(vaddrBitsExtended.W)
val idx = UInt(idxBits.W)
val way_en = UInt(nWays.W)
val data = UInt(cacheParams.tagCode.width(new L1Metadata().getWidth).W)
}
class DCache(staticIdForMetadataUseOnly: Int, val crossing: ClockCrossingType)(implicit p: Parameters) extends HellaCache(staticIdForMetadataUseOnly)(p) {
override lazy val module = new DCacheModule(this)
}
class DCacheTLBPort(implicit p: Parameters) extends CoreBundle()(p) {
val req = Flipped(Decoupled(new TLBReq(coreDataBytes.log2)))
val s1_resp = Output(new TLBResp(coreDataBytes.log2))
val s2_kill = Input(Bool())
}
class DCacheModule(outer: DCache) extends HellaCacheModule(outer) {
val tECC = cacheParams.tagCode
val dECC = cacheParams.dataCode
require(subWordBits % eccBits == 0, "subWordBits must be a multiple of eccBits")
require(eccBytes == 1 || !dECC.isInstanceOf[IdentityCode])
require(cacheParams.silentDrop || cacheParams.acquireBeforeRelease, "!silentDrop requires acquireBeforeRelease")
val usingRMW = eccBytes > 1 || usingAtomicsInCache
val mmioOffset = outer.firstMMIO
edge.manager.requireFifo(TLFIFOFixer.allVolatile) // TileLink pipelining MMIO requests
val clock_en_reg = Reg(Bool())
io.cpu.clock_enabled := clock_en_reg
val gated_clock =
if (!cacheParams.clockGate) clock
else ClockGate(clock, clock_en_reg, "dcache_clock_gate")
class DCacheModuleImpl { // entering gated-clock domain
val tlb = Module(new TLB(false, log2Ceil(coreDataBytes), TLBConfig(nTLBSets, nTLBWays, cacheParams.nTLBBasePageSectors, cacheParams.nTLBSuperpages)))
val pma_checker = Module(new TLB(false, log2Ceil(coreDataBytes), TLBConfig(nTLBSets, nTLBWays, cacheParams.nTLBBasePageSectors, cacheParams.nTLBSuperpages)) with InlineInstance)
// tags
val replacer = ReplacementPolicy.fromString(cacheParams.replacementPolicy, nWays)
/** Metadata Arbiter:
* 0: Tag update on reset
* 1: Tag update on ECC error
* 2: Tag update on hit
* 3: Tag update on refill
* 4: Tag update on release
* 5: Tag update on flush
* 6: Tag update on probe
* 7: Tag update on CPU request
*/
val metaArb = Module(new Arbiter(new DCacheMetadataReq, 8) with InlineInstance)
val tag_array = DescribedSRAM(
name = s"${tileParams.baseName}_dcache_tag_array",
desc = "DCache Tag Array",
size = nSets,
data = Vec(nWays, chiselTypeOf(metaArb.io.out.bits.data))
)
// data
val data = Module(new DCacheDataArray)
/** Data Arbiter
* 0: data from pending store buffer
* 1: data from TL-D refill
* 2: release to TL-A
* 3: hit path to CPU
*/
val dataArb = Module(new Arbiter(new DCacheDataReq, 4) with InlineInstance)
dataArb.io.in.tail.foreach(_.bits.wdata := dataArb.io.in.head.bits.wdata) // tie off write ports by default
data.io.req.bits <> dataArb.io.out.bits
data.io.req.valid := dataArb.io.out.valid
dataArb.io.out.ready := true.B
metaArb.io.out.ready := clock_en_reg
val tl_out_a = Wire(chiselTypeOf(tl_out.a))
tl_out.a <> {
val a_queue_depth = outer.crossing match {
case RationalCrossing(_) => // TODO make this depend on the actual ratio?
if (cacheParams.separateUncachedResp) (maxUncachedInFlight + 1) / 2
else 2 min maxUncachedInFlight-1
case SynchronousCrossing(BufferParams.none) => 1 // Need some buffering to guarantee livelock freedom
case SynchronousCrossing(_) => 0 // Adequate buffering within the crossing
case _: AsynchronousCrossing => 0 // Adequate buffering within the crossing
case _: CreditedCrossing => 0 // Adequate buffering within the crossing
}
Queue(tl_out_a, a_queue_depth, flow = true)
}
val (tl_out_c, release_queue_empty) =
if (cacheParams.acquireBeforeRelease) {
val q = Module(new Queue(chiselTypeOf(tl_out.c.bits), cacheDataBeats, flow = true))
tl_out.c <> q.io.deq
(q.io.enq, q.io.count === 0.U)
} else {
(tl_out.c, true.B)
}
val s1_valid = RegNext(io.cpu.req.fire, false.B)
val s1_probe = RegNext(tl_out.b.fire, false.B)
val probe_bits = RegEnable(tl_out.b.bits, tl_out.b.fire) // TODO has data now :(
val s1_nack = WireDefault(false.B)
val s1_valid_masked = s1_valid && !io.cpu.s1_kill
val s1_valid_not_nacked = s1_valid && !s1_nack
val s1_tlb_req_valid = RegNext(io.tlb_port.req.fire, false.B)
val s2_tlb_req_valid = RegNext(s1_tlb_req_valid, false.B)
val s0_clk_en = metaArb.io.out.valid && !metaArb.io.out.bits.write
val s0_req = WireInit(io.cpu.req.bits)
s0_req.addr := Cat(metaArb.io.out.bits.addr >> blockOffBits, io.cpu.req.bits.addr(blockOffBits-1,0))
s0_req.idx.foreach(_ := Cat(metaArb.io.out.bits.idx, s0_req.addr(blockOffBits-1, 0)))
when (!metaArb.io.in(7).ready) { s0_req.phys := true.B }
val s1_req = RegEnable(s0_req, s0_clk_en)
val s1_vaddr = Cat(s1_req.idx.getOrElse(s1_req.addr) >> tagLSB, s1_req.addr(tagLSB-1, 0))
val s0_tlb_req = WireInit(io.tlb_port.req.bits)
when (!io.tlb_port.req.fire) {
s0_tlb_req.passthrough := s0_req.phys
s0_tlb_req.vaddr := s0_req.addr
s0_tlb_req.size := s0_req.size
s0_tlb_req.cmd := s0_req.cmd
s0_tlb_req.prv := s0_req.dprv
s0_tlb_req.v := s0_req.dv
}
val s1_tlb_req = RegEnable(s0_tlb_req, s0_clk_en || io.tlb_port.req.valid)
val s1_read = isRead(s1_req.cmd)
val s1_write = isWrite(s1_req.cmd)
val s1_readwrite = s1_read || s1_write
val s1_sfence = s1_req.cmd === M_SFENCE || s1_req.cmd === M_HFENCEV || s1_req.cmd === M_HFENCEG
val s1_flush_line = s1_req.cmd === M_FLUSH_ALL && s1_req.size(0)
val s1_flush_valid = Reg(Bool())
val s1_waw_hazard = Wire(Bool())
val s_ready :: s_voluntary_writeback :: s_probe_rep_dirty :: s_probe_rep_clean :: s_probe_retry :: s_probe_rep_miss :: s_voluntary_write_meta :: s_probe_write_meta :: s_dummy :: s_voluntary_release :: Nil = Enum(10)
val supports_flush = outer.flushOnFenceI || coreParams.haveCFlush
val flushed = RegInit(true.B)
val flushing = RegInit(false.B)
val flushing_req = Reg(chiselTypeOf(s1_req))
val cached_grant_wait = RegInit(false.B)
val resetting = RegInit(false.B)
val flushCounter = RegInit((nSets * (nWays-1)).U(log2Ceil(nSets * nWays).W))
val release_ack_wait = RegInit(false.B)
val release_ack_addr = Reg(UInt(paddrBits.W))
val release_state = RegInit(s_ready)
val refill_way = Reg(UInt())
val any_pstore_valid = Wire(Bool())
val inWriteback = release_state.isOneOf(s_voluntary_writeback, s_probe_rep_dirty)
val releaseWay = Wire(UInt())
io.cpu.req.ready := (release_state === s_ready) && !cached_grant_wait && !s1_nack
// I/O MSHRs
val uncachedInFlight = RegInit(VecInit(Seq.fill(maxUncachedInFlight)(false.B)))
val uncachedReqs = Reg(Vec(maxUncachedInFlight, new HellaCacheReq))
val uncachedResp = WireInit(new HellaCacheReq, DontCare)
// hit initiation path
val s0_read = isRead(io.cpu.req.bits.cmd)
dataArb.io.in(3).valid := io.cpu.req.valid && likelyNeedsRead(io.cpu.req.bits)
dataArb.io.in(3).bits := dataArb.io.in(1).bits
dataArb.io.in(3).bits.write := false.B
dataArb.io.in(3).bits.addr := Cat(io.cpu.req.bits.idx.getOrElse(io.cpu.req.bits.addr) >> tagLSB, io.cpu.req.bits.addr(tagLSB-1, 0))
dataArb.io.in(3).bits.wordMask := {
val mask = (subWordBytes.log2 until rowOffBits).foldLeft(1.U) { case (in, i) =>
val upper_mask = Mux((i >= wordBytes.log2).B || io.cpu.req.bits.size <= i.U, 0.U,
((BigInt(1) << (1 << (i - subWordBytes.log2)))-1).U)
val upper = Mux(io.cpu.req.bits.addr(i), in, 0.U) | upper_mask
val lower = Mux(io.cpu.req.bits.addr(i), 0.U, in)
upper ## lower
}
Fill(subWordBytes / eccBytes, mask)
}
dataArb.io.in(3).bits.eccMask := ~0.U((wordBytes / eccBytes).W)
dataArb.io.in(3).bits.way_en := ~0.U(nWays.W)
when (!dataArb.io.in(3).ready && s0_read) { io.cpu.req.ready := false.B }
val s1_did_read = RegEnable(dataArb.io.in(3).ready && (io.cpu.req.valid && needsRead(io.cpu.req.bits)), s0_clk_en)
val s1_read_mask = RegEnable(dataArb.io.in(3).bits.wordMask, s0_clk_en)
metaArb.io.in(7).valid := io.cpu.req.valid
metaArb.io.in(7).bits.write := false.B
metaArb.io.in(7).bits.idx := dataArb.io.in(3).bits.addr(idxMSB, idxLSB)
metaArb.io.in(7).bits.addr := io.cpu.req.bits.addr
metaArb.io.in(7).bits.way_en := metaArb.io.in(4).bits.way_en
metaArb.io.in(7).bits.data := metaArb.io.in(4).bits.data
when (!metaArb.io.in(7).ready) { io.cpu.req.ready := false.B }
// address translation
val s1_cmd_uses_tlb = s1_readwrite || s1_flush_line || s1_req.cmd === M_WOK
io.ptw <> tlb.io.ptw
tlb.io.kill := io.cpu.s2_kill || s2_tlb_req_valid && io.tlb_port.s2_kill
tlb.io.req.valid := s1_tlb_req_valid || s1_valid && !io.cpu.s1_kill && s1_cmd_uses_tlb
tlb.io.req.bits := s1_tlb_req
when (!tlb.io.req.ready && !tlb.io.ptw.resp.valid && !io.cpu.req.bits.phys) { io.cpu.req.ready := false.B }
when (!s1_tlb_req_valid && s1_valid && s1_cmd_uses_tlb && tlb.io.resp.miss) { s1_nack := true.B }
tlb.io.sfence.valid := s1_valid && !io.cpu.s1_kill && s1_sfence
tlb.io.sfence.bits.rs1 := s1_req.size(0)
tlb.io.sfence.bits.rs2 := s1_req.size(1)
tlb.io.sfence.bits.asid := io.cpu.s1_data.data
tlb.io.sfence.bits.addr := s1_req.addr
tlb.io.sfence.bits.hv := s1_req.cmd === M_HFENCEV
tlb.io.sfence.bits.hg := s1_req.cmd === M_HFENCEG
io.tlb_port.req.ready := clock_en_reg
io.tlb_port.s1_resp := tlb.io.resp
when (s1_tlb_req_valid && s1_valid && !(s1_req.phys && s1_req.no_xcpt)) { s1_nack := true.B }
pma_checker.io <> DontCare
pma_checker.io.req.bits.passthrough := true.B
pma_checker.io.req.bits.vaddr := s1_req.addr
pma_checker.io.req.bits.size := s1_req.size
pma_checker.io.req.bits.cmd := s1_req.cmd
pma_checker.io.req.bits.prv := s1_req.dprv
pma_checker.io.req.bits.v := s1_req.dv
val s1_paddr = Cat(Mux(s1_tlb_req_valid, s1_req.addr(paddrBits-1, pgIdxBits), tlb.io.resp.paddr >> pgIdxBits), s1_req.addr(pgIdxBits-1, 0))
val s1_victim_way = Wire(UInt())
val (s1_hit_way, s1_hit_state, s1_meta) =
if (usingDataScratchpad) {
val baseAddr = p(LookupByHartId)(_.dcache.flatMap(_.scratch.map(_.U)), io_hartid.get) | io_mmio_address_prefix.get
val inScratchpad = s1_paddr >= baseAddr && s1_paddr < baseAddr + (nSets * cacheBlockBytes).U
val hitState = Mux(inScratchpad, ClientMetadata.maximum, ClientMetadata.onReset)
val dummyMeta = L1Metadata(0.U, ClientMetadata.onReset)
(inScratchpad, hitState, Seq(tECC.encode(dummyMeta.asUInt)))
} else {
val metaReq = metaArb.io.out
val metaIdx = metaReq.bits.idx
when (metaReq.valid && metaReq.bits.write) {
val wmask = if (nWays == 1) Seq(true.B) else metaReq.bits.way_en.asBools
tag_array.write(metaIdx, VecInit(Seq.fill(nWays)(metaReq.bits.data)), wmask)
}
val s1_meta = tag_array.read(metaIdx, metaReq.valid && !metaReq.bits.write)
val s1_meta_uncorrected = s1_meta.map(tECC.decode(_).uncorrected.asTypeOf(new L1Metadata))
val s1_tag = s1_paddr >> tagLSB
val s1_meta_hit_way = s1_meta_uncorrected.map(r => r.coh.isValid() && r.tag === s1_tag).asUInt
val s1_meta_hit_state = (
s1_meta_uncorrected.map(r => Mux(r.tag === s1_tag && !s1_flush_valid, r.coh.asUInt, 0.U))
.reduce (_|_)).asTypeOf(chiselTypeOf(ClientMetadata.onReset))
(s1_meta_hit_way, s1_meta_hit_state, s1_meta)
}
val s1_data_way = WireDefault(if (nWays == 1) 1.U else Mux(inWriteback, releaseWay, s1_hit_way))
val tl_d_data_encoded = Wire(chiselTypeOf(encodeData(tl_out.d.bits.data, false.B)))
val s1_all_data_ways = VecInit(data.io.resp ++ (!cacheParams.separateUncachedResp).option(tl_d_data_encoded))
val s1_mask_xwr = new StoreGen(s1_req.size, s1_req.addr, 0.U, wordBytes).mask
val s1_mask = Mux(s1_req.cmd === M_PWR, io.cpu.s1_data.mask, s1_mask_xwr)
// for partial writes, s1_data.mask must be a subset of s1_mask_xwr
assert(!(s1_valid_masked && s1_req.cmd === M_PWR) || (s1_mask_xwr | ~io.cpu.s1_data.mask).andR)
val s2_valid = RegNext(s1_valid_masked && !s1_sfence, init=false.B)
val s2_valid_no_xcpt = s2_valid && !io.cpu.s2_xcpt.asUInt.orR
val s2_probe = RegNext(s1_probe, init=false.B)
val releaseInFlight = s1_probe || s2_probe || release_state =/= s_ready
val s2_not_nacked_in_s1 = RegNext(!s1_nack)
val s2_valid_not_nacked_in_s1 = s2_valid && s2_not_nacked_in_s1
val s2_valid_masked = s2_valid_no_xcpt && s2_not_nacked_in_s1
val s2_valid_not_killed = s2_valid_masked && !io.cpu.s2_kill
val s2_req = Reg(chiselTypeOf(io.cpu.req.bits))
val s2_cmd_flush_all = s2_req.cmd === M_FLUSH_ALL && !s2_req.size(0)
val s2_cmd_flush_line = s2_req.cmd === M_FLUSH_ALL && s2_req.size(0)
val s2_tlb_xcpt = Reg(chiselTypeOf(tlb.io.resp))
val s2_pma = Reg(chiselTypeOf(tlb.io.resp))
val s2_uncached_resp_addr = Reg(chiselTypeOf(s2_req.addr)) // should be DCE'd in synthesis
when (s1_valid_not_nacked || s1_flush_valid) {
s2_req := s1_req
s2_req.addr := s1_paddr
s2_tlb_xcpt := tlb.io.resp
s2_pma := Mux(s1_tlb_req_valid, pma_checker.io.resp, tlb.io.resp)
}
val s2_vaddr = Cat(RegEnable(s1_vaddr, s1_valid_not_nacked || s1_flush_valid) >> tagLSB, s2_req.addr(tagLSB-1, 0))
val s2_read = isRead(s2_req.cmd)
val s2_write = isWrite(s2_req.cmd)
val s2_readwrite = s2_read || s2_write
val s2_flush_valid_pre_tag_ecc = RegNext(s1_flush_valid)
val s1_meta_decoded = s1_meta.map(tECC.decode(_))
val s1_meta_clk_en = s1_valid_not_nacked || s1_flush_valid || s1_probe
val s2_meta_correctable_errors = s1_meta_decoded.map(m => RegEnable(m.correctable, s1_meta_clk_en)).asUInt
val s2_meta_uncorrectable_errors = s1_meta_decoded.map(m => RegEnable(m.uncorrectable, s1_meta_clk_en)).asUInt
val s2_meta_error_uncorrectable = s2_meta_uncorrectable_errors.orR
val s2_meta_corrected = s1_meta_decoded.map(m => RegEnable(m.corrected, s1_meta_clk_en).asTypeOf(new L1Metadata))
val s2_meta_error = (s2_meta_uncorrectable_errors | s2_meta_correctable_errors).orR
val s2_flush_valid = s2_flush_valid_pre_tag_ecc && !s2_meta_error
val s2_data = {
val wordsPerRow = rowBits / subWordBits
val en = s1_valid || inWriteback || io.cpu.replay_next
val word_en = Mux(inWriteback, Fill(wordsPerRow, 1.U), Mux(s1_did_read, s1_read_mask, 0.U))
val s1_way_words = s1_all_data_ways.map(_.grouped(dECC.width(eccBits) * (subWordBits / eccBits)))
if (cacheParams.pipelineWayMux) {
val s1_word_en = Mux(io.cpu.replay_next, 0.U, word_en)
(for (i <- 0 until wordsPerRow) yield {
val s2_way_en = RegEnable(Mux(s1_word_en(i), s1_data_way, 0.U), en)
val s2_way_words = (0 until nWays).map(j => RegEnable(s1_way_words(j)(i), en && word_en(i)))
(0 until nWays).map(j => Mux(s2_way_en(j), s2_way_words(j), 0.U)).reduce(_|_)
}).asUInt
} else {
val s1_word_en = Mux(!io.cpu.replay_next, word_en, UIntToOH(uncachedResp.addr.extract(log2Up(rowBits/8)-1, log2Up(wordBytes)), wordsPerRow))
(for (i <- 0 until wordsPerRow) yield {
RegEnable(Mux1H(Mux(s1_word_en(i), s1_data_way, 0.U), s1_way_words.map(_(i))), en)
}).asUInt
}
}
val s2_probe_way = RegEnable(s1_hit_way, s1_probe)
val s2_probe_state = RegEnable(s1_hit_state, s1_probe)
val s2_hit_way = RegEnable(s1_hit_way, s1_valid_not_nacked)
val s2_hit_state = RegEnable(s1_hit_state, s1_valid_not_nacked || s1_flush_valid)
val s2_waw_hazard = RegEnable(s1_waw_hazard, s1_valid_not_nacked)
val s2_store_merge = Wire(Bool())
val s2_hit_valid = s2_hit_state.isValid()
val (s2_hit, s2_grow_param, s2_new_hit_state) = s2_hit_state.onAccess(s2_req.cmd)
val s2_data_decoded = decodeData(s2_data)
val s2_word_idx = s2_req.addr.extract(log2Up(rowBits/8)-1, log2Up(wordBytes))
val s2_data_error = s2_data_decoded.map(_.error).orR
val s2_data_error_uncorrectable = s2_data_decoded.map(_.uncorrectable).orR
val s2_data_corrected = (s2_data_decoded.map(_.corrected): Seq[UInt]).asUInt
val s2_data_uncorrected = (s2_data_decoded.map(_.uncorrected): Seq[UInt]).asUInt
val s2_valid_hit_maybe_flush_pre_data_ecc_and_waw = s2_valid_masked && !s2_meta_error && s2_hit
val s2_no_alloc_hazard = if (!usingVM || pgIdxBits >= untagBits) false.B else {
// make sure that any in-flight non-allocating accesses are ordered before
// any allocating accesses. this can only happen if aliasing is possible.
val any_no_alloc_in_flight = Reg(Bool())
when (!uncachedInFlight.asUInt.orR) { any_no_alloc_in_flight := false.B }
when (s2_valid && s2_req.no_alloc) { any_no_alloc_in_flight := true.B }
val s1_need_check = any_no_alloc_in_flight || s2_valid && s2_req.no_alloc
val concerns = (uncachedInFlight zip uncachedReqs) :+ (s2_valid && s2_req.no_alloc, s2_req)
val s1_uncached_hits = concerns.map { c =>
val concern_wmask = new StoreGen(c._2.size, c._2.addr, 0.U, wordBytes).mask
val addr_match = (c._2.addr ^ s1_paddr)(pgIdxBits+pgLevelBits-1, wordBytes.log2) === 0.U
val mask_match = (concern_wmask & s1_mask_xwr).orR || c._2.cmd === M_PWR || s1_req.cmd === M_PWR
val cmd_match = isWrite(c._2.cmd) || isWrite(s1_req.cmd)
c._1 && s1_need_check && cmd_match && addr_match && mask_match
}
val s2_uncached_hits = RegEnable(s1_uncached_hits.asUInt, s1_valid_not_nacked)
s2_uncached_hits.orR
}
val s2_valid_hit_pre_data_ecc_and_waw = s2_valid_hit_maybe_flush_pre_data_ecc_and_waw && s2_readwrite && !s2_no_alloc_hazard
val s2_valid_flush_line = s2_valid_hit_maybe_flush_pre_data_ecc_and_waw && s2_cmd_flush_line
val s2_valid_hit_pre_data_ecc = s2_valid_hit_pre_data_ecc_and_waw && (!s2_waw_hazard || s2_store_merge)
val s2_valid_data_error = s2_valid_hit_pre_data_ecc_and_waw && s2_data_error
val s2_valid_hit = s2_valid_hit_pre_data_ecc && !s2_data_error
val s2_valid_miss = s2_valid_masked && s2_readwrite && !s2_meta_error && !s2_hit
val s2_uncached = !s2_pma.cacheable || s2_req.no_alloc && !s2_pma.must_alloc && !s2_hit_valid
val s2_valid_cached_miss = s2_valid_miss && !s2_uncached && !uncachedInFlight.asUInt.orR
dontTouch(s2_valid_cached_miss)
val s2_want_victimize = (!usingDataScratchpad).B && (s2_valid_cached_miss || s2_valid_flush_line || s2_valid_data_error || s2_flush_valid)
val s2_cannot_victimize = !s2_flush_valid && io.cpu.s2_kill
val s2_victimize = s2_want_victimize && !s2_cannot_victimize
val s2_valid_uncached_pending = s2_valid_miss && s2_uncached && !uncachedInFlight.asUInt.andR
val s2_victim_way = UIntToOH(RegEnable(s1_victim_way, s1_valid_not_nacked || s1_flush_valid))
val s2_victim_or_hit_way = Mux(s2_hit_valid, s2_hit_way, s2_victim_way)
val s2_victim_tag = Mux(s2_valid_data_error || s2_valid_flush_line, s2_req.addr(paddrBits-1, tagLSB), Mux1H(s2_victim_way, s2_meta_corrected).tag)
val s2_victim_state = Mux(s2_hit_valid, s2_hit_state, Mux1H(s2_victim_way, s2_meta_corrected).coh)
val (s2_prb_ack_data, s2_report_param, probeNewCoh)= s2_probe_state.onProbe(probe_bits.param)
val (s2_victim_dirty, s2_shrink_param, voluntaryNewCoh) = s2_victim_state.onCacheControl(M_FLUSH)
dontTouch(s2_victim_dirty)
val s2_update_meta = s2_hit_state =/= s2_new_hit_state
val s2_dont_nack_uncached = s2_valid_uncached_pending && tl_out_a.ready
val s2_dont_nack_misc = s2_valid_masked && !s2_meta_error &&
(supports_flush.B && s2_cmd_flush_all && flushed && !flushing ||
supports_flush.B && s2_cmd_flush_line && !s2_hit ||
s2_req.cmd === M_WOK)
io.cpu.s2_nack := s2_valid_no_xcpt && !s2_dont_nack_uncached && !s2_dont_nack_misc && !s2_valid_hit
when (io.cpu.s2_nack || (s2_valid_hit_pre_data_ecc_and_waw && s2_update_meta)) { s1_nack := true.B }
// tag updates on ECC errors
val s2_first_meta_corrected = PriorityMux(s2_meta_correctable_errors, s2_meta_corrected)
metaArb.io.in(1).valid := s2_meta_error && (s2_valid_masked || s2_flush_valid_pre_tag_ecc || s2_probe)
metaArb.io.in(1).bits.write := true.B
metaArb.io.in(1).bits.way_en := s2_meta_uncorrectable_errors | Mux(s2_meta_error_uncorrectable, 0.U, PriorityEncoderOH(s2_meta_correctable_errors))
metaArb.io.in(1).bits.idx := Mux(s2_probe, probeIdx(probe_bits), s2_vaddr(idxMSB, idxLSB))
metaArb.io.in(1).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, metaArb.io.in(1).bits.idx << blockOffBits)
metaArb.io.in(1).bits.data := tECC.encode {
val new_meta = WireDefault(s2_first_meta_corrected)
when (s2_meta_error_uncorrectable) { new_meta.coh := ClientMetadata.onReset }
new_meta.asUInt
}
// tag updates on hit
metaArb.io.in(2).valid := s2_valid_hit_pre_data_ecc_and_waw && s2_update_meta
metaArb.io.in(2).bits.write := !io.cpu.s2_kill
metaArb.io.in(2).bits.way_en := s2_victim_or_hit_way
metaArb.io.in(2).bits.idx := s2_vaddr(idxMSB, idxLSB)
metaArb.io.in(2).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, s2_vaddr(idxMSB, 0))
metaArb.io.in(2).bits.data := tECC.encode(L1Metadata(s2_req.addr >> tagLSB, s2_new_hit_state).asUInt)
// load reservations and TL error reporting
val s2_lr = (usingAtomics && !usingDataScratchpad).B && s2_req.cmd === M_XLR
val s2_sc = (usingAtomics && !usingDataScratchpad).B && s2_req.cmd === M_XSC
val lrscCount = RegInit(0.U)
val lrscValid = lrscCount > lrscBackoff.U
val lrscBackingOff = lrscCount > 0.U && !lrscValid
val lrscAddr = Reg(UInt())
val lrscAddrMatch = lrscAddr === (s2_req.addr >> blockOffBits)
val s2_sc_fail = s2_sc && !(lrscValid && lrscAddrMatch)
when ((s2_valid_hit && s2_lr && !cached_grant_wait || s2_valid_cached_miss) && !io.cpu.s2_kill) {
lrscCount := Mux(s2_hit, (lrscCycles - 1).U, 0.U)
lrscAddr := s2_req.addr >> blockOffBits
}
when (lrscCount > 0.U) { lrscCount := lrscCount - 1.U }
when (s2_valid_not_killed && lrscValid) { lrscCount := lrscBackoff.U }
when (s1_probe) { lrscCount := 0.U }
// don't perform data correction if it might clobber a recent store
val s2_correct = s2_data_error && !any_pstore_valid && !RegNext(any_pstore_valid || s2_valid) && usingDataScratchpad.B
// pending store buffer
val s2_valid_correct = s2_valid_hit_pre_data_ecc_and_waw && s2_correct && !io.cpu.s2_kill
def s2_store_valid_pre_kill = s2_valid_hit && s2_write && !s2_sc_fail
def s2_store_valid = s2_store_valid_pre_kill && !io.cpu.s2_kill
val pstore1_cmd = RegEnable(s1_req.cmd, s1_valid_not_nacked && s1_write)
val pstore1_addr = RegEnable(s1_vaddr, s1_valid_not_nacked && s1_write)
val pstore1_data = RegEnable(io.cpu.s1_data.data, s1_valid_not_nacked && s1_write)
val pstore1_way = RegEnable(s1_hit_way, s1_valid_not_nacked && s1_write)
val pstore1_mask = RegEnable(s1_mask, s1_valid_not_nacked && s1_write)
val pstore1_storegen_data = WireDefault(pstore1_data)
val pstore1_rmw = usingRMW.B && RegEnable(needsRead(s1_req), s1_valid_not_nacked && s1_write)
val pstore1_merge_likely = s2_valid_not_nacked_in_s1 && s2_write && s2_store_merge
val pstore1_merge = s2_store_valid && s2_store_merge
val pstore2_valid = RegInit(false.B)
val pstore_drain_opportunistic = !(io.cpu.req.valid && likelyNeedsRead(io.cpu.req.bits)) && !(s1_valid && s1_waw_hazard)
val pstore_drain_on_miss = releaseInFlight || RegNext(io.cpu.s2_nack)
val pstore1_held = RegInit(false.B)
val pstore1_valid_likely = s2_valid && s2_write || pstore1_held
def pstore1_valid_not_rmw(s2_kill: Bool) = s2_valid_hit_pre_data_ecc && s2_write && !s2_kill || pstore1_held
val pstore1_valid = s2_store_valid || pstore1_held
any_pstore_valid := pstore1_held || pstore2_valid
val pstore_drain_structural = pstore1_valid_likely && pstore2_valid && ((s1_valid && s1_write) || pstore1_rmw)
assert(pstore1_rmw || pstore1_valid_not_rmw(io.cpu.s2_kill) === pstore1_valid)
ccover(pstore_drain_structural, "STORE_STRUCTURAL_HAZARD", "D$ read-modify-write structural hazard")
ccover(pstore1_valid && pstore_drain_on_miss, "STORE_DRAIN_ON_MISS", "D$ store buffer drain on miss")
ccover(s1_valid_not_nacked && s1_waw_hazard, "WAW_HAZARD", "D$ write-after-write hazard")
def should_pstore_drain(truly: Bool) = {
val s2_kill = truly && io.cpu.s2_kill
!pstore1_merge_likely &&
(usingRMW.B && pstore_drain_structural ||
(((pstore1_valid_not_rmw(s2_kill) && !pstore1_rmw) || pstore2_valid) && (pstore_drain_opportunistic || pstore_drain_on_miss)))
}
val pstore_drain = should_pstore_drain(true.B)
pstore1_held := (s2_store_valid && !s2_store_merge || pstore1_held) && pstore2_valid && !pstore_drain
val advance_pstore1 = (pstore1_valid || s2_valid_correct) && (pstore2_valid === pstore_drain)
pstore2_valid := pstore2_valid && !pstore_drain || advance_pstore1
val pstore2_addr = RegEnable(Mux(s2_correct, s2_vaddr, pstore1_addr), advance_pstore1)
val pstore2_way = RegEnable(Mux(s2_correct, s2_hit_way, pstore1_way), advance_pstore1)
val pstore2_storegen_data = {
for (i <- 0 until wordBytes)
yield RegEnable(pstore1_storegen_data(8*(i+1)-1, 8*i), advance_pstore1 || pstore1_merge && pstore1_mask(i))
}.asUInt
val pstore2_storegen_mask = {
val mask = Reg(UInt(wordBytes.W))
when (advance_pstore1 || pstore1_merge) {
val mergedMask = pstore1_mask | Mux(pstore1_merge, mask, 0.U)
mask := ~Mux(s2_correct, 0.U, ~mergedMask)
}
mask
}
s2_store_merge := (if (eccBytes == 1) false.B else {
ccover(pstore1_merge, "STORE_MERGED", "D$ store merged")
// only merge stores to ECC granules that are already stored-to, to avoid
// WAW hazards
val wordMatch = (eccMask(pstore2_storegen_mask) | ~eccMask(pstore1_mask)).andR
val idxMatch = s2_vaddr(untagBits-1, log2Ceil(wordBytes)) === pstore2_addr(untagBits-1, log2Ceil(wordBytes))
val tagMatch = (s2_hit_way & pstore2_way).orR
pstore2_valid && wordMatch && idxMatch && tagMatch
})
dataArb.io.in(0).valid := should_pstore_drain(false.B)
dataArb.io.in(0).bits.write := pstore_drain
dataArb.io.in(0).bits.addr := Mux(pstore2_valid, pstore2_addr, pstore1_addr)
dataArb.io.in(0).bits.way_en := Mux(pstore2_valid, pstore2_way, pstore1_way)
dataArb.io.in(0).bits.wdata := encodeData(Fill(rowWords, Mux(pstore2_valid, pstore2_storegen_data, pstore1_data)), false.B)
dataArb.io.in(0).bits.wordMask := {
val eccMask = dataArb.io.in(0).bits.eccMask.asBools.grouped(subWordBytes/eccBytes).map(_.orR).toSeq.asUInt
val wordMask = UIntToOH(Mux(pstore2_valid, pstore2_addr, pstore1_addr).extract(rowOffBits-1, wordBytes.log2))
FillInterleaved(wordBytes/subWordBytes, wordMask) & Fill(rowBytes/wordBytes, eccMask)
}
dataArb.io.in(0).bits.eccMask := eccMask(Mux(pstore2_valid, pstore2_storegen_mask, pstore1_mask))
// store->load RAW hazard detection
def s1Depends(addr: UInt, mask: UInt) =
addr(idxMSB, wordOffBits) === s1_vaddr(idxMSB, wordOffBits) &&
Mux(s1_write, (eccByteMask(mask) & eccByteMask(s1_mask_xwr)).orR, (mask & s1_mask_xwr).orR)
val s1_hazard =
(pstore1_valid_likely && s1Depends(pstore1_addr, pstore1_mask)) ||
(pstore2_valid && s1Depends(pstore2_addr, pstore2_storegen_mask))
val s1_raw_hazard = s1_read && s1_hazard
s1_waw_hazard := (if (eccBytes == 1) false.B else {
ccover(s1_valid_not_nacked && s1_waw_hazard, "WAW_HAZARD", "D$ write-after-write hazard")
s1_write && (s1_hazard || needsRead(s1_req) && !s1_did_read)
})
when (s1_valid && s1_raw_hazard) { s1_nack := true.B }
// performance hints to processor
io.cpu.s2_nack_cause_raw := RegNext(s1_raw_hazard) || !(!s2_waw_hazard || s2_store_merge)
// Prepare a TileLink request message that initiates a transaction
val a_source = PriorityEncoder(~uncachedInFlight.asUInt << mmioOffset) // skip the MSHR
val acquire_address = (s2_req.addr >> idxLSB) << idxLSB
val access_address = s2_req.addr
val a_size = s2_req.size
val a_data = Fill(beatWords, pstore1_data)
val a_mask = pstore1_mask << (access_address.extract(beatBytes.log2-1, wordBytes.log2) << 3)
val get = edge.Get(a_source, access_address, a_size)._2
val put = edge.Put(a_source, access_address, a_size, a_data)._2
val putpartial = edge.Put(a_source, access_address, a_size, a_data, a_mask)._2
val atomics = if (edge.manager.anySupportLogical) {
MuxLookup(s2_req.cmd, WireDefault(0.U.asTypeOf(new TLBundleA(edge.bundle))))(Array(
M_XA_SWAP -> edge.Logical(a_source, access_address, a_size, a_data, TLAtomics.SWAP)._2,
M_XA_XOR -> edge.Logical(a_source, access_address, a_size, a_data, TLAtomics.XOR) ._2,
M_XA_OR -> edge.Logical(a_source, access_address, a_size, a_data, TLAtomics.OR) ._2,
M_XA_AND -> edge.Logical(a_source, access_address, a_size, a_data, TLAtomics.AND) ._2,
M_XA_ADD -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.ADD)._2,
M_XA_MIN -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.MIN)._2,
M_XA_MAX -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.MAX)._2,
M_XA_MINU -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.MINU)._2,
M_XA_MAXU -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.MAXU)._2))
} else {
// If no managers support atomics, assert fail if processor asks for them
assert (!(tl_out_a.valid && s2_read && s2_write && s2_uncached))
WireDefault(new TLBundleA(edge.bundle), DontCare)
}
tl_out_a.valid := !io.cpu.s2_kill &&
(s2_valid_uncached_pending ||
(s2_valid_cached_miss &&
!(release_ack_wait && (s2_req.addr ^ release_ack_addr)(((pgIdxBits + pgLevelBits) min paddrBits) - 1, idxLSB) === 0.U) &&
(cacheParams.acquireBeforeRelease.B && !release_ack_wait && release_queue_empty || !s2_victim_dirty)))
tl_out_a.bits := Mux(!s2_uncached, acquire(s2_vaddr, s2_req.addr, s2_grow_param),
Mux(!s2_write, get,
Mux(s2_req.cmd === M_PWR, putpartial,
Mux(!s2_read, put, atomics))))
// Drive APROT Bits
tl_out_a.bits.user.lift(AMBAProt).foreach { x =>
val user_bit_cacheable = s2_pma.cacheable
x.privileged := s2_req.dprv === PRV.M.U || user_bit_cacheable
// if the address is cacheable, enable outer caches
x.bufferable := user_bit_cacheable
x.modifiable := user_bit_cacheable
x.readalloc := user_bit_cacheable
x.writealloc := user_bit_cacheable
// Following are always tied off
x.fetch := false.B
x.secure := true.B
}
// Set pending bits for outstanding TileLink transaction
val a_sel = UIntToOH(a_source, maxUncachedInFlight+mmioOffset) >> mmioOffset
when (tl_out_a.fire) {
when (s2_uncached) {
(a_sel.asBools zip (uncachedInFlight zip uncachedReqs)) foreach { case (s, (f, r)) =>
when (s) {
f := true.B
r := s2_req
r.cmd := Mux(s2_write, Mux(s2_req.cmd === M_PWR, M_PWR, M_XWR), M_XRD)
}
}
}.otherwise {
cached_grant_wait := true.B
refill_way := s2_victim_or_hit_way
}
}
// grant
val (d_first, d_last, d_done, d_address_inc) = edge.addr_inc(tl_out.d)
val (d_opc, grantIsUncached, grantIsUncachedData) = {
val uncachedGrantOpcodesSansData = Seq(AccessAck, HintAck)
val uncachedGrantOpcodesWithData = Seq(AccessAckData)
val uncachedGrantOpcodes = uncachedGrantOpcodesWithData ++ uncachedGrantOpcodesSansData
val whole_opc = tl_out.d.bits.opcode
if (usingDataScratchpad) {
assert(!tl_out.d.valid || whole_opc.isOneOf(uncachedGrantOpcodes))
// the only valid TL-D messages are uncached, so we can do some pruning
val opc = whole_opc(uncachedGrantOpcodes.map(_.getWidth).max - 1, 0)
val data = DecodeLogic(opc, uncachedGrantOpcodesWithData, uncachedGrantOpcodesSansData)
(opc, true.B, data)
} else {
(whole_opc, whole_opc.isOneOf(uncachedGrantOpcodes), whole_opc.isOneOf(uncachedGrantOpcodesWithData))
}
}
tl_d_data_encoded := encodeData(tl_out.d.bits.data, tl_out.d.bits.corrupt && !io.ptw.customCSRs.suppressCorruptOnGrantData && !grantIsUncached)
val grantIsCached = d_opc.isOneOf(Grant, GrantData)
val grantIsVoluntary = d_opc === ReleaseAck // Clears a different pending bit
val grantIsRefill = d_opc === GrantData // Writes the data array
val grantInProgress = RegInit(false.B)
val blockProbeAfterGrantCount = RegInit(0.U)
when (blockProbeAfterGrantCount > 0.U) { blockProbeAfterGrantCount := blockProbeAfterGrantCount - 1.U }
val canAcceptCachedGrant = !release_state.isOneOf(s_voluntary_writeback, s_voluntary_write_meta, s_voluntary_release)
tl_out.d.ready := Mux(grantIsCached, (!d_first || tl_out.e.ready) && canAcceptCachedGrant, true.B)
val uncachedRespIdxOH = UIntToOH(tl_out.d.bits.source, maxUncachedInFlight+mmioOffset) >> mmioOffset
uncachedResp := Mux1H(uncachedRespIdxOH, uncachedReqs)
when (tl_out.d.fire) {
when (grantIsCached) {
grantInProgress := true.B
assert(cached_grant_wait, "A GrantData was unexpected by the dcache.")
when(d_last) {
cached_grant_wait := false.B
grantInProgress := false.B
blockProbeAfterGrantCount := (blockProbeAfterGrantCycles - 1).U
replacer.miss
}
} .elsewhen (grantIsUncached) {
(uncachedRespIdxOH.asBools zip uncachedInFlight) foreach { case (s, f) =>
when (s && d_last) {
assert(f, "An AccessAck was unexpected by the dcache.") // TODO must handle Ack coming back on same cycle!
f := false.B
}
}
when (grantIsUncachedData) {
if (!cacheParams.separateUncachedResp) {
if (!cacheParams.pipelineWayMux)
s1_data_way := 1.U << nWays
s2_req.cmd := M_XRD
s2_req.size := uncachedResp.size
s2_req.signed := uncachedResp.signed
s2_req.tag := uncachedResp.tag
s2_req.addr := {
require(rowOffBits >= beatOffBits)
val dontCareBits = s1_paddr >> rowOffBits << rowOffBits
dontCareBits | uncachedResp.addr(beatOffBits-1, 0)
}
s2_uncached_resp_addr := uncachedResp.addr
}
}
} .elsewhen (grantIsVoluntary) {
assert(release_ack_wait, "A ReleaseAck was unexpected by the dcache.") // TODO should handle Ack coming back on same cycle!
release_ack_wait := false.B
}
}
// Finish TileLink transaction by issuing a GrantAck
tl_out.e.valid := tl_out.d.valid && d_first && grantIsCached && canAcceptCachedGrant
tl_out.e.bits := edge.GrantAck(tl_out.d.bits)
assert(tl_out.e.fire === (tl_out.d.fire && d_first && grantIsCached))
// data refill
// note this ready-valid signaling ignores E-channel backpressure, which
// benignly means the data RAM might occasionally be redundantly written
dataArb.io.in(1).valid := tl_out.d.valid && grantIsRefill && canAcceptCachedGrant
when (grantIsRefill && !dataArb.io.in(1).ready) {
tl_out.e.valid := false.B
tl_out.d.ready := false.B
}
if (!usingDataScratchpad) {
dataArb.io.in(1).bits.write := true.B
dataArb.io.in(1).bits.addr := (s2_vaddr >> idxLSB) << idxLSB | d_address_inc
dataArb.io.in(1).bits.way_en := refill_way
dataArb.io.in(1).bits.wdata := tl_d_data_encoded
dataArb.io.in(1).bits.wordMask := ~0.U((rowBytes / subWordBytes).W)
dataArb.io.in(1).bits.eccMask := ~0.U((wordBytes / eccBytes).W)
} else {
dataArb.io.in(1).bits := dataArb.io.in(0).bits
}
// tag updates on refill
// ignore backpressure from metaArb, which can only be caused by tag ECC
// errors on hit-under-miss. failing to write the new tag will leave the
// line invalid, so we'll simply request the line again later.
metaArb.io.in(3).valid := grantIsCached && d_done && !tl_out.d.bits.denied
metaArb.io.in(3).bits.write := true.B
metaArb.io.in(3).bits.way_en := refill_way
metaArb.io.in(3).bits.idx := s2_vaddr(idxMSB, idxLSB)
metaArb.io.in(3).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, s2_vaddr(idxMSB, 0))
metaArb.io.in(3).bits.data := tECC.encode(L1Metadata(s2_req.addr >> tagLSB, s2_hit_state.onGrant(s2_req.cmd, tl_out.d.bits.param)).asUInt)
if (!cacheParams.separateUncachedResp) {
// don't accept uncached grants if there's a structural hazard on s2_data...
val blockUncachedGrant = Reg(Bool())
blockUncachedGrant := dataArb.io.out.valid
when (grantIsUncachedData && (blockUncachedGrant || s1_valid)) {
tl_out.d.ready := false.B
// ...but insert bubble to guarantee grant's eventual forward progress
when (tl_out.d.valid) {
io.cpu.req.ready := false.B
dataArb.io.in(1).valid := true.B
dataArb.io.in(1).bits.write := false.B
blockUncachedGrant := !dataArb.io.in(1).ready
}
}
}
ccover(tl_out.d.valid && !tl_out.d.ready, "BLOCK_D", "D$ D-channel blocked")
// Handle an incoming TileLink Probe message
val block_probe_for_core_progress = blockProbeAfterGrantCount > 0.U || lrscValid
val block_probe_for_pending_release_ack = release_ack_wait && (tl_out.b.bits.address ^ release_ack_addr)(((pgIdxBits + pgLevelBits) min paddrBits) - 1, idxLSB) === 0.U
val block_probe_for_ordering = releaseInFlight || block_probe_for_pending_release_ack || grantInProgress
metaArb.io.in(6).valid := tl_out.b.valid && (!block_probe_for_core_progress || lrscBackingOff)
tl_out.b.ready := metaArb.io.in(6).ready && !(block_probe_for_core_progress || block_probe_for_ordering || s1_valid || s2_valid)
metaArb.io.in(6).bits.write := false.B
metaArb.io.in(6).bits.idx := probeIdx(tl_out.b.bits)
metaArb.io.in(6).bits.addr := Cat(io.cpu.req.bits.addr >> paddrBits, tl_out.b.bits.address)
metaArb.io.in(6).bits.way_en := metaArb.io.in(4).bits.way_en
metaArb.io.in(6).bits.data := metaArb.io.in(4).bits.data
// replacement policy
s1_victim_way := (if (replacer.perSet && nWays > 1) {
val repl_array = Mem(nSets, UInt(replacer.nBits.W))
val s1_repl_idx = s1_req.addr(idxBits+blockOffBits-1, blockOffBits)
val s2_repl_idx = s2_vaddr(idxBits+blockOffBits-1, blockOffBits)
val s2_repl_state = Reg(UInt(replacer.nBits.W))
val s2_new_repl_state = replacer.get_next_state(s2_repl_state, OHToUInt(s2_hit_way))
val s2_repl_wen = s2_valid_masked && s2_hit_way.orR && s2_repl_state =/= s2_new_repl_state
val s1_repl_state = Mux(s2_repl_wen && s2_repl_idx === s1_repl_idx, s2_new_repl_state, repl_array(s1_repl_idx))
when (s1_valid_not_nacked) { s2_repl_state := s1_repl_state }
val waddr = Mux(resetting, flushCounter(idxBits-1, 0), s2_repl_idx)
val wdata = Mux(resetting, 0.U, s2_new_repl_state)
val wen = resetting || s2_repl_wen
when (wen) { repl_array(waddr) := wdata }
replacer.get_replace_way(s1_repl_state)
} else {
replacer.way
})
// release
val (c_first, c_last, releaseDone, c_count) = edge.count(tl_out_c)
val releaseRejected = Wire(Bool())
val s1_release_data_valid = RegNext(dataArb.io.in(2).fire)
val s2_release_data_valid = RegNext(s1_release_data_valid && !releaseRejected)
releaseRejected := s2_release_data_valid && !tl_out_c.fire
val releaseDataBeat = Cat(0.U, c_count) + Mux(releaseRejected, 0.U, s1_release_data_valid + Cat(0.U, s2_release_data_valid))
val nackResponseMessage = edge.ProbeAck(b = probe_bits, reportPermissions = TLPermissions.NtoN)
val cleanReleaseMessage = edge.ProbeAck(b = probe_bits, reportPermissions = s2_report_param)
val dirtyReleaseMessage = edge.ProbeAck(b = probe_bits, reportPermissions = s2_report_param, data = 0.U)
tl_out_c.valid := (s2_release_data_valid || (!cacheParams.silentDrop.B && release_state === s_voluntary_release)) && !(c_first && release_ack_wait)
tl_out_c.bits := nackResponseMessage
val newCoh = WireDefault(probeNewCoh)
releaseWay := s2_probe_way
if (!usingDataScratchpad) {
when (s2_victimize) {
assert(s2_valid_flush_line || s2_flush_valid || io.cpu.s2_nack)
val discard_line = s2_valid_flush_line && s2_req.size(1) || s2_flush_valid && flushing_req.size(1)
release_state := Mux(s2_victim_dirty && !discard_line, s_voluntary_writeback,
Mux(!cacheParams.silentDrop.B && !release_ack_wait && release_queue_empty && s2_victim_state.isValid() && (s2_valid_flush_line || s2_flush_valid || s2_readwrite && !s2_hit_valid), s_voluntary_release,
s_voluntary_write_meta))
probe_bits := addressToProbe(s2_vaddr, Cat(s2_victim_tag, s2_req.addr(tagLSB-1, idxLSB)) << idxLSB)
}
when (s2_probe) {
val probeNack = WireDefault(true.B)
when (s2_meta_error) {
release_state := s_probe_retry
}.elsewhen (s2_prb_ack_data) {
release_state := s_probe_rep_dirty
}.elsewhen (s2_probe_state.isValid()) {
tl_out_c.valid := true.B
tl_out_c.bits := cleanReleaseMessage
release_state := Mux(releaseDone, s_probe_write_meta, s_probe_rep_clean)
}.otherwise {
tl_out_c.valid := true.B
probeNack := !releaseDone
release_state := Mux(releaseDone, s_ready, s_probe_rep_miss)
}
when (probeNack) { s1_nack := true.B }
}
when (release_state === s_probe_retry) {
metaArb.io.in(6).valid := true.B
metaArb.io.in(6).bits.idx := probeIdx(probe_bits)
metaArb.io.in(6).bits.addr := Cat(io.cpu.req.bits.addr >> paddrBits, probe_bits.address)
when (metaArb.io.in(6).ready) {
release_state := s_ready
s1_probe := true.B
}
}
when (release_state === s_probe_rep_miss) {
tl_out_c.valid := true.B
when (releaseDone) { release_state := s_ready }
}
when (release_state === s_probe_rep_clean) {
tl_out_c.valid := true.B
tl_out_c.bits := cleanReleaseMessage
when (releaseDone) { release_state := s_probe_write_meta }
}
when (release_state === s_probe_rep_dirty) {
tl_out_c.bits := dirtyReleaseMessage
when (releaseDone) { release_state := s_probe_write_meta }
}
when (release_state.isOneOf(s_voluntary_writeback, s_voluntary_write_meta, s_voluntary_release)) {
when (release_state === s_voluntary_release) {
tl_out_c.bits := edge.Release(fromSource = 0.U,
toAddress = 0.U,
lgSize = lgCacheBlockBytes.U,
shrinkPermissions = s2_shrink_param)._2
}.otherwise {
tl_out_c.bits := edge.Release(fromSource = 0.U,
toAddress = 0.U,
lgSize = lgCacheBlockBytes.U,
shrinkPermissions = s2_shrink_param,
data = 0.U)._2
}
newCoh := voluntaryNewCoh
releaseWay := s2_victim_or_hit_way
when (releaseDone) { release_state := s_voluntary_write_meta }
when (tl_out_c.fire && c_first) {
release_ack_wait := true.B
release_ack_addr := probe_bits.address
}
}
tl_out_c.bits.source := probe_bits.source
tl_out_c.bits.address := probe_bits.address
tl_out_c.bits.data := s2_data_corrected
tl_out_c.bits.corrupt := inWriteback && s2_data_error_uncorrectable
}
tl_out_c.bits.user.lift(AMBAProt).foreach { x =>
x.fetch := false.B
x.secure := true.B
x.privileged := true.B
x.bufferable := true.B
x.modifiable := true.B
x.readalloc := true.B
x.writealloc := true.B
}
dataArb.io.in(2).valid := inWriteback && releaseDataBeat < refillCycles.U
dataArb.io.in(2).bits := dataArb.io.in(1).bits
dataArb.io.in(2).bits.write := false.B
dataArb.io.in(2).bits.addr := (probeIdx(probe_bits) << blockOffBits) | (releaseDataBeat(log2Up(refillCycles)-1,0) << rowOffBits)
dataArb.io.in(2).bits.wordMask := ~0.U((rowBytes / subWordBytes).W)
dataArb.io.in(2).bits.eccMask := ~0.U((wordBytes / eccBytes).W)
dataArb.io.in(2).bits.way_en := ~0.U(nWays.W)
metaArb.io.in(4).valid := release_state.isOneOf(s_voluntary_write_meta, s_probe_write_meta)
metaArb.io.in(4).bits.write := true.B
metaArb.io.in(4).bits.way_en := releaseWay
metaArb.io.in(4).bits.idx := probeIdx(probe_bits)
metaArb.io.in(4).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, probe_bits.address(idxMSB, 0))
metaArb.io.in(4).bits.data := tECC.encode(L1Metadata(tl_out_c.bits.address >> tagLSB, newCoh).asUInt)
when (metaArb.io.in(4).fire) { release_state := s_ready }
// cached response
(io.cpu.resp.bits: Data).waiveAll :<>= (s2_req: Data).waiveAll
io.cpu.resp.bits.has_data := s2_read
io.cpu.resp.bits.replay := false.B
io.cpu.s2_uncached := s2_uncached && !s2_hit
io.cpu.s2_paddr := s2_req.addr
io.cpu.s2_gpa := s2_tlb_xcpt.gpa
io.cpu.s2_gpa_is_pte := s2_tlb_xcpt.gpa_is_pte
// report whether there are any outstanding accesses. disregard any
// slave-port accesses, since they don't affect local memory ordering.
val s1_isSlavePortAccess = s1_req.no_xcpt
val s2_isSlavePortAccess = s2_req.no_xcpt
io.cpu.ordered := !(s1_valid && !s1_isSlavePortAccess || s2_valid && !s2_isSlavePortAccess || cached_grant_wait || uncachedInFlight.asUInt.orR)
io.cpu.store_pending := (cached_grant_wait && isWrite(s2_req.cmd)) || uncachedInFlight.asUInt.orR
val s1_xcpt_valid = tlb.io.req.valid && !s1_isSlavePortAccess && !s1_nack
io.cpu.s2_xcpt := Mux(RegNext(s1_xcpt_valid), s2_tlb_xcpt, 0.U.asTypeOf(s2_tlb_xcpt))
if (usingDataScratchpad) {
assert(!(s2_valid_masked && s2_req.cmd.isOneOf(M_XLR, M_XSC)))
} else {
ccover(tl_out.b.valid && !tl_out.b.ready, "BLOCK_B", "D$ B-channel blocked")
}
// uncached response
val s1_uncached_data_word = {
val word_idx = uncachedResp.addr.extract(log2Up(rowBits/8)-1, log2Up(wordBytes))
val words = tl_out.d.bits.data.grouped(wordBits)
words(word_idx)
}
val s2_uncached_data_word = RegEnable(s1_uncached_data_word, io.cpu.replay_next)
val doUncachedResp = RegNext(io.cpu.replay_next)
io.cpu.resp.valid := (s2_valid_hit_pre_data_ecc || doUncachedResp) && !s2_data_error
io.cpu.replay_next := tl_out.d.fire && grantIsUncachedData && !cacheParams.separateUncachedResp.B
when (doUncachedResp) {
assert(!s2_valid_hit)
io.cpu.resp.bits.replay := true.B
io.cpu.resp.bits.addr := s2_uncached_resp_addr
}
io.cpu.uncached_resp.map { resp =>
resp.valid := tl_out.d.valid && grantIsUncachedData
resp.bits.tag := uncachedResp.tag
resp.bits.size := uncachedResp.size
resp.bits.signed := uncachedResp.signed
resp.bits.data := new LoadGen(uncachedResp.size, uncachedResp.signed, uncachedResp.addr, s1_uncached_data_word, false.B, wordBytes).data
resp.bits.data_raw := s1_uncached_data_word
when (grantIsUncachedData && !resp.ready) {
tl_out.d.ready := false.B
}
}
// load data subword mux/sign extension
val s2_data_word = (0 until rowBits by wordBits).map(i => s2_data_uncorrected(wordBits+i-1,i)).reduce(_|_)
val s2_data_word_corrected = (0 until rowBits by wordBits).map(i => s2_data_corrected(wordBits+i-1,i)).reduce(_|_)
val s2_data_word_possibly_uncached = Mux(cacheParams.pipelineWayMux.B && doUncachedResp, s2_uncached_data_word, 0.U) | s2_data_word
val loadgen = new LoadGen(s2_req.size, s2_req.signed, s2_req.addr, s2_data_word_possibly_uncached, s2_sc, wordBytes)
io.cpu.resp.bits.data := loadgen.data | s2_sc_fail
io.cpu.resp.bits.data_word_bypass := loadgen.wordData
io.cpu.resp.bits.data_raw := s2_data_word
io.cpu.resp.bits.store_data := pstore1_data
// AMOs
if (usingRMW) {
val amoalus = (0 until coreDataBits / xLen).map { i =>
val amoalu = Module(new AMOALU(xLen))
amoalu.io.mask := pstore1_mask >> (i * xBytes)
amoalu.io.cmd := (if (usingAtomicsInCache) pstore1_cmd else M_XWR)
amoalu.io.lhs := s2_data_word >> (i * xLen)
amoalu.io.rhs := pstore1_data >> (i * xLen)
amoalu
}
pstore1_storegen_data := (if (!usingDataScratchpad) amoalus.map(_.io.out).asUInt else {
val mask = FillInterleaved(8, Mux(s2_correct, 0.U, pstore1_mask))
amoalus.map(_.io.out_unmasked).asUInt & mask | s2_data_word_corrected & ~mask
})
} else if (!usingAtomics) {
assert(!(s1_valid_masked && s1_read && s1_write), "unsupported D$ operation")
}
if (coreParams.useVector) {
edge.manager.managers.foreach { m =>
// Statically ensure that no-allocate accesses are permitted.
// We could consider turning some of these into dynamic PMA checks.
require(!m.supportsAcquireB || m.supportsGet, "With a vector unit, cacheable memory must support Get")
require(!m.supportsAcquireT || m.supportsPutPartial, "With a vector unit, cacheable memory must support PutPartial")
}
}
// flushes
if (!usingDataScratchpad)
when (RegNext(reset.asBool)) { resetting := true.B }
val flushCounterNext = flushCounter +& 1.U
val flushDone = (flushCounterNext >> log2Ceil(nSets)) === nWays.U
val flushCounterWrap = flushCounterNext(log2Ceil(nSets)-1, 0)
ccover(s2_valid_masked && s2_cmd_flush_all && s2_meta_error, "TAG_ECC_ERROR_DURING_FENCE_I", "D$ ECC error in tag array during cache flush")
ccover(s2_valid_masked && s2_cmd_flush_all && s2_data_error, "DATA_ECC_ERROR_DURING_FENCE_I", "D$ ECC error in data array during cache flush")
s1_flush_valid := metaArb.io.in(5).fire && !s1_flush_valid && !s2_flush_valid_pre_tag_ecc && release_state === s_ready && !release_ack_wait
metaArb.io.in(5).valid := flushing && !flushed
metaArb.io.in(5).bits.write := false.B
metaArb.io.in(5).bits.idx := flushCounter(idxBits-1, 0)
metaArb.io.in(5).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, metaArb.io.in(5).bits.idx << blockOffBits)
metaArb.io.in(5).bits.way_en := metaArb.io.in(4).bits.way_en
metaArb.io.in(5).bits.data := metaArb.io.in(4).bits.data
// Only flush D$ on FENCE.I if some cached executable regions are untracked.
if (supports_flush) {
when (s2_valid_masked && s2_cmd_flush_all) {
when (!flushed && !io.cpu.s2_kill && !release_ack_wait && !uncachedInFlight.asUInt.orR) {
flushing := true.B
flushing_req := s2_req
}
}
when (tl_out_a.fire && !s2_uncached) { flushed := false.B }
when (flushing) {
s1_victim_way := flushCounter >> log2Up(nSets)
when (s2_flush_valid) {
flushCounter := flushCounterNext
when (flushDone) {
flushed := true.B
if (!isPow2(nWays)) flushCounter := flushCounterWrap
}
}
when (flushed && release_state === s_ready && !release_ack_wait) {
flushing := false.B
}
}
}
metaArb.io.in(0).valid := resetting
metaArb.io.in(0).bits := metaArb.io.in(5).bits
metaArb.io.in(0).bits.write := true.B
metaArb.io.in(0).bits.way_en := ~0.U(nWays.W)
metaArb.io.in(0).bits.data := tECC.encode(L1Metadata(0.U, ClientMetadata.onReset).asUInt)
when (resetting) {
flushCounter := flushCounterNext
when (flushDone) {
resetting := false.B
if (!isPow2(nWays)) flushCounter := flushCounterWrap
}
}
// gate the clock
clock_en_reg := !cacheParams.clockGate.B ||
io.ptw.customCSRs.disableDCacheClockGate ||
io.cpu.keep_clock_enabled ||
metaArb.io.out.valid || // subsumes resetting || flushing
s1_probe || s2_probe ||
s1_valid || s2_valid ||
io.tlb_port.req.valid ||
s1_tlb_req_valid || s2_tlb_req_valid ||
pstore1_held || pstore2_valid ||
release_state =/= s_ready ||
release_ack_wait || !release_queue_empty ||
!tlb.io.req.ready ||
cached_grant_wait || uncachedInFlight.asUInt.orR ||
lrscCount > 0.U || blockProbeAfterGrantCount > 0.U
// performance events
io.cpu.perf.acquire := edge.done(tl_out_a)
io.cpu.perf.release := edge.done(tl_out_c)
io.cpu.perf.grant := tl_out.d.valid && d_last
io.cpu.perf.tlbMiss := io.ptw.req.fire
io.cpu.perf.storeBufferEmptyAfterLoad := !(
(s1_valid && s1_write) ||
((s2_valid && s2_write && !s2_waw_hazard) || pstore1_held) ||
pstore2_valid)
io.cpu.perf.storeBufferEmptyAfterStore := !(
(s1_valid && s1_write) ||
(s2_valid && s2_write && pstore1_rmw) ||
((s2_valid && s2_write && !s2_waw_hazard || pstore1_held) && pstore2_valid))
io.cpu.perf.canAcceptStoreThenLoad := !(
((s2_valid && s2_write && pstore1_rmw) && (s1_valid && s1_write && !s1_waw_hazard)) ||
(pstore2_valid && pstore1_valid_likely && (s1_valid && s1_write)))
io.cpu.perf.canAcceptStoreThenRMW := io.cpu.perf.canAcceptStoreThenLoad && !pstore2_valid
io.cpu.perf.canAcceptLoadThenLoad := !((s1_valid && s1_write && needsRead(s1_req)) && ((s2_valid && s2_write && !s2_waw_hazard || pstore1_held) || pstore2_valid))
io.cpu.perf.blocked := {
// stop reporting blocked just before unblocking to avoid overly conservative stalling
val beatsBeforeEnd = outer.crossing match {
case SynchronousCrossing(_) => 2
case RationalCrossing(_) => 1 // assumes 1 < ratio <= 2; need more bookkeeping for optimal handling of >2
case _: AsynchronousCrossing => 1 // likewise
case _: CreditedCrossing => 1 // likewise
}
val near_end_of_refill = if (cacheBlockBytes / beatBytes <= beatsBeforeEnd) tl_out.d.valid else {
val refill_count = RegInit(0.U((cacheBlockBytes / beatBytes).log2.W))
when (tl_out.d.fire && grantIsRefill) { refill_count := refill_count + 1.U }
refill_count >= (cacheBlockBytes / beatBytes - beatsBeforeEnd).U
}
cached_grant_wait && !near_end_of_refill
}
// report errors
val (data_error, data_error_uncorrectable, data_error_addr) =
if (usingDataScratchpad) (s2_valid_data_error, s2_data_error_uncorrectable, s2_req.addr) else {
(RegNext(tl_out_c.fire && inWriteback && s2_data_error),
RegNext(s2_data_error_uncorrectable),
probe_bits.address) // This is stable for a cycle after tl_out_c.fire, so don't need a register
}
{
val error_addr =
Mux(metaArb.io.in(1).valid, Cat(s2_first_meta_corrected.tag, metaArb.io.in(1).bits.addr(tagLSB-1, idxLSB)),
data_error_addr >> idxLSB) << idxLSB
io.errors.uncorrectable.foreach { u =>
u.valid := metaArb.io.in(1).valid && s2_meta_error_uncorrectable || data_error && data_error_uncorrectable
u.bits := error_addr
}
io.errors.correctable.foreach { c =>
c.valid := metaArb.io.in(1).valid || data_error
c.bits := error_addr
io.errors.uncorrectable.foreach { u => when (u.valid) { c.valid := false.B } }
}
io.errors.bus.valid := tl_out.d.fire && (tl_out.d.bits.denied || tl_out.d.bits.corrupt)
io.errors.bus.bits := Mux(grantIsCached, s2_req.addr >> idxLSB << idxLSB, 0.U)
ccoverNotScratchpad(io.errors.bus.valid && grantIsCached, "D_ERROR_CACHED", "D$ D-channel error, cached")
ccover(io.errors.bus.valid && !grantIsCached, "D_ERROR_UNCACHED", "D$ D-channel error, uncached")
}
if (usingDataScratchpad) {
val data_error_cover = Seq(
property.CoverBoolean(!data_error, Seq("no_data_error")),
property.CoverBoolean(data_error && !data_error_uncorrectable, Seq("data_correctable_error")),
property.CoverBoolean(data_error && data_error_uncorrectable, Seq("data_uncorrectable_error")))
val request_source = Seq(
property.CoverBoolean(s2_isSlavePortAccess, Seq("from_TL")),
property.CoverBoolean(!s2_isSlavePortAccess, Seq("from_CPU")))
property.cover(new property.CrossProperty(
Seq(data_error_cover, request_source),
Seq(),
"MemorySystem;;Scratchpad Memory Bit Flip Cross Covers"))
} else {
val data_error_type = Seq(
property.CoverBoolean(!s2_valid_data_error, Seq("no_data_error")),
property.CoverBoolean(s2_valid_data_error && !s2_data_error_uncorrectable, Seq("data_correctable_error")),
property.CoverBoolean(s2_valid_data_error && s2_data_error_uncorrectable, Seq("data_uncorrectable_error")))
val data_error_dirty = Seq(
property.CoverBoolean(!s2_victim_dirty, Seq("data_clean")),
property.CoverBoolean(s2_victim_dirty, Seq("data_dirty")))
val request_source = if (supports_flush) {
Seq(
property.CoverBoolean(!flushing, Seq("access")),
property.CoverBoolean(flushing, Seq("during_flush")))
} else {
Seq(property.CoverBoolean(true.B, Seq("never_flush")))
}
val tag_error_cover = Seq(
property.CoverBoolean( !s2_meta_error, Seq("no_tag_error")),
property.CoverBoolean( s2_meta_error && !s2_meta_error_uncorrectable, Seq("tag_correctable_error")),
property.CoverBoolean( s2_meta_error && s2_meta_error_uncorrectable, Seq("tag_uncorrectable_error")))
property.cover(new property.CrossProperty(
Seq(data_error_type, data_error_dirty, request_source, tag_error_cover),
Seq(),
"MemorySystem;;Cache Memory Bit Flip Cross Covers"))
}
} // leaving gated-clock domain
val dcacheImpl = withClock (gated_clock) { new DCacheModuleImpl }
def encodeData(x: UInt, poison: Bool) = x.grouped(eccBits).map(dECC.encode(_, if (dECC.canDetect) poison else false.B)).asUInt
def dummyEncodeData(x: UInt) = x.grouped(eccBits).map(dECC.swizzle(_)).asUInt
def decodeData(x: UInt) = x.grouped(dECC.width(eccBits)).map(dECC.decode(_))
def eccMask(byteMask: UInt) = byteMask.grouped(eccBytes).map(_.orR).asUInt
def eccByteMask(byteMask: UInt) = FillInterleaved(eccBytes, eccMask(byteMask))
def likelyNeedsRead(req: HellaCacheReq) = {
val res = !req.cmd.isOneOf(M_XWR, M_PFW) || req.size < log2Ceil(eccBytes).U
assert(!needsRead(req) || res)
res
}
def needsRead(req: HellaCacheReq) =
isRead(req.cmd) ||
(isWrite(req.cmd) && (req.cmd === M_PWR || req.size < log2Ceil(eccBytes).U))
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
property.cover(cond, s"DCACHE_$label", "MemorySystem;;" + desc)
def ccoverNotScratchpad(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
if (!usingDataScratchpad) ccover(cond, label, desc)
require(!usingVM || tagLSB <= pgIdxBits, s"D$$ set size must not exceed ${1<<(pgIdxBits-10)} KiB; got ${(nSets * cacheBlockBytes)>>10} KiB")
def tagLSB: Int = untagBits
def probeIdx(b: TLBundleB): UInt = b.address(idxMSB, idxLSB)
def addressToProbe(vaddr: UInt, paddr: UInt): TLBundleB = {
val res = Wire(new TLBundleB(edge.bundle))
res :#= DontCare
res.address := paddr
res.source := (mmioOffset - 1).U
res
}
def acquire(vaddr: UInt, paddr: UInt, param: UInt): TLBundleA = {
if (!edge.manager.anySupportAcquireB) WireDefault(0.U.asTypeOf(new TLBundleA(edge.bundle)))
else edge.AcquireBlock(0.U, paddr >> lgCacheBlockBytes << lgCacheBlockBytes, lgCacheBlockBytes.U, param)._2
}
}
File DescribedSRAM.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3.{Data, SyncReadMem, Vec}
import chisel3.util.log2Ceil
object DescribedSRAM {
def apply[T <: Data](
name: String,
desc: String,
size: BigInt, // depth
data: T
): SyncReadMem[T] = {
val mem = SyncReadMem(size, data)
mem.suggestName(name)
val granWidth = data match {
case v: Vec[_] => v.head.getWidth
case d => d.getWidth
}
val uid = 0
Annotated.srams(
component = mem,
name = name,
address_width = log2Ceil(size),
data_width = data.getWidth,
depth = size,
description = desc,
write_mask_granularity = granWidth
)
mem
}
}
| module DCacheDataArray( // @[DCache.scala:49:7]
input clock, // @[DCache.scala:49:7]
input reset, // @[DCache.scala:49:7]
input io_req_valid, // @[DCache.scala:50:14]
input [11:0] io_req_bits_addr, // @[DCache.scala:50:14]
input io_req_bits_write, // @[DCache.scala:50:14]
input [127:0] io_req_bits_wdata, // @[DCache.scala:50:14]
input [1:0] io_req_bits_wordMask, // @[DCache.scala:50:14]
input [7:0] io_req_bits_eccMask, // @[DCache.scala:50:14]
input [7:0] io_req_bits_way_en, // @[DCache.scala:50:14]
output [127:0] io_resp_0, // @[DCache.scala:50:14]
output [127:0] io_resp_1, // @[DCache.scala:50:14]
output [127:0] io_resp_2, // @[DCache.scala:50:14]
output [127:0] io_resp_3, // @[DCache.scala:50:14]
output [127:0] io_resp_4, // @[DCache.scala:50:14]
output [127:0] io_resp_5, // @[DCache.scala:50:14]
output [127:0] io_resp_6, // @[DCache.scala:50:14]
output [127:0] io_resp_7 // @[DCache.scala:50:14]
);
wire [511:0] _rockettile_dcache_data_arrays_1_RW0_rdata; // @[DescribedSRAM.scala:17:26]
wire [511:0] _rockettile_dcache_data_arrays_0_RW0_rdata; // @[DescribedSRAM.scala:17:26]
wire io_req_valid_0 = io_req_valid; // @[DCache.scala:49:7]
wire [11:0] io_req_bits_addr_0 = io_req_bits_addr; // @[DCache.scala:49:7]
wire io_req_bits_write_0 = io_req_bits_write; // @[DCache.scala:49:7]
wire [127:0] io_req_bits_wdata_0 = io_req_bits_wdata; // @[DCache.scala:49:7]
wire [1:0] io_req_bits_wordMask_0 = io_req_bits_wordMask; // @[DCache.scala:49:7]
wire [7:0] io_req_bits_eccMask_0 = io_req_bits_eccMask; // @[DCache.scala:49:7]
wire [7:0] io_req_bits_way_en_0 = io_req_bits_way_en; // @[DCache.scala:49:7]
wire [127:0] _io_resp_0_T; // @[package.scala:45:27]
wire [127:0] _io_resp_1_T; // @[package.scala:45:27]
wire [127:0] _io_resp_2_T; // @[package.scala:45:27]
wire [127:0] _io_resp_3_T; // @[package.scala:45:27]
wire [127:0] _io_resp_4_T; // @[package.scala:45:27]
wire [127:0] _io_resp_5_T; // @[package.scala:45:27]
wire [127:0] _io_resp_6_T; // @[package.scala:45:27]
wire [127:0] _io_resp_7_T; // @[package.scala:45:27]
wire [127:0] io_resp_0_0; // @[DCache.scala:49:7]
wire [127:0] io_resp_1_0; // @[DCache.scala:49:7]
wire [127:0] io_resp_2_0; // @[DCache.scala:49:7]
wire [127:0] io_resp_3_0; // @[DCache.scala:49:7]
wire [127:0] io_resp_4_0; // @[DCache.scala:49:7]
wire [127:0] io_resp_5_0; // @[DCache.scala:49:7]
wire [127:0] io_resp_6_0; // @[DCache.scala:49:7]
wire [127:0] io_resp_7_0; // @[DCache.scala:49:7]
wire eccMask_0 = io_req_bits_eccMask_0[0]; // @[DCache.scala:49:7, :56:82]
wire eccMask_1 = io_req_bits_eccMask_0[1]; // @[DCache.scala:49:7, :56:82]
wire eccMask_2 = io_req_bits_eccMask_0[2]; // @[DCache.scala:49:7, :56:82]
wire eccMask_3 = io_req_bits_eccMask_0[3]; // @[DCache.scala:49:7, :56:82]
wire eccMask_4 = io_req_bits_eccMask_0[4]; // @[DCache.scala:49:7, :56:82]
wire eccMask_5 = io_req_bits_eccMask_0[5]; // @[DCache.scala:49:7, :56:82]
wire eccMask_6 = io_req_bits_eccMask_0[6]; // @[DCache.scala:49:7, :56:82]
wire eccMask_7 = io_req_bits_eccMask_0[7]; // @[DCache.scala:49:7, :56:82]
wire _wMask_T = io_req_bits_way_en_0[0]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_1 = io_req_bits_way_en_0[0]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_2 = io_req_bits_way_en_0[0]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_3 = io_req_bits_way_en_0[0]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_4 = io_req_bits_way_en_0[0]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_5 = io_req_bits_way_en_0[0]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_6 = io_req_bits_way_en_0[0]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_7 = io_req_bits_way_en_0[0]; // @[DCache.scala:49:7, :57:108]
wire wMask_0 = eccMask_0 & _wMask_T; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_1 = eccMask_1 & _wMask_T_1; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_2 = eccMask_2 & _wMask_T_2; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_3 = eccMask_3 & _wMask_T_3; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_4 = eccMask_4 & _wMask_T_4; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_5 = eccMask_5 & _wMask_T_5; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_6 = eccMask_6 & _wMask_T_6; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_7 = eccMask_7 & _wMask_T_7; // @[DCache.scala:56:82, :57:{87,108}]
wire _wMask_T_8 = io_req_bits_way_en_0[1]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_9 = io_req_bits_way_en_0[1]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_10 = io_req_bits_way_en_0[1]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_11 = io_req_bits_way_en_0[1]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_12 = io_req_bits_way_en_0[1]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_13 = io_req_bits_way_en_0[1]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_14 = io_req_bits_way_en_0[1]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_15 = io_req_bits_way_en_0[1]; // @[DCache.scala:49:7, :57:108]
wire wMask_8 = eccMask_0 & _wMask_T_8; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_9 = eccMask_1 & _wMask_T_9; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_10 = eccMask_2 & _wMask_T_10; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_11 = eccMask_3 & _wMask_T_11; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_12 = eccMask_4 & _wMask_T_12; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_13 = eccMask_5 & _wMask_T_13; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_14 = eccMask_6 & _wMask_T_14; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_15 = eccMask_7 & _wMask_T_15; // @[DCache.scala:56:82, :57:{87,108}]
wire _wMask_T_16 = io_req_bits_way_en_0[2]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_17 = io_req_bits_way_en_0[2]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_18 = io_req_bits_way_en_0[2]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_19 = io_req_bits_way_en_0[2]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_20 = io_req_bits_way_en_0[2]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_21 = io_req_bits_way_en_0[2]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_22 = io_req_bits_way_en_0[2]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_23 = io_req_bits_way_en_0[2]; // @[DCache.scala:49:7, :57:108]
wire wMask_16 = eccMask_0 & _wMask_T_16; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_17 = eccMask_1 & _wMask_T_17; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_18 = eccMask_2 & _wMask_T_18; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_19 = eccMask_3 & _wMask_T_19; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_20 = eccMask_4 & _wMask_T_20; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_21 = eccMask_5 & _wMask_T_21; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_22 = eccMask_6 & _wMask_T_22; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_23 = eccMask_7 & _wMask_T_23; // @[DCache.scala:56:82, :57:{87,108}]
wire _wMask_T_24 = io_req_bits_way_en_0[3]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_25 = io_req_bits_way_en_0[3]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_26 = io_req_bits_way_en_0[3]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_27 = io_req_bits_way_en_0[3]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_28 = io_req_bits_way_en_0[3]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_29 = io_req_bits_way_en_0[3]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_30 = io_req_bits_way_en_0[3]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_31 = io_req_bits_way_en_0[3]; // @[DCache.scala:49:7, :57:108]
wire wMask_24 = eccMask_0 & _wMask_T_24; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_25 = eccMask_1 & _wMask_T_25; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_26 = eccMask_2 & _wMask_T_26; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_27 = eccMask_3 & _wMask_T_27; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_28 = eccMask_4 & _wMask_T_28; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_29 = eccMask_5 & _wMask_T_29; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_30 = eccMask_6 & _wMask_T_30; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_31 = eccMask_7 & _wMask_T_31; // @[DCache.scala:56:82, :57:{87,108}]
wire _wMask_T_32 = io_req_bits_way_en_0[4]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_33 = io_req_bits_way_en_0[4]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_34 = io_req_bits_way_en_0[4]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_35 = io_req_bits_way_en_0[4]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_36 = io_req_bits_way_en_0[4]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_37 = io_req_bits_way_en_0[4]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_38 = io_req_bits_way_en_0[4]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_39 = io_req_bits_way_en_0[4]; // @[DCache.scala:49:7, :57:108]
wire wMask_32 = eccMask_0 & _wMask_T_32; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_33 = eccMask_1 & _wMask_T_33; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_34 = eccMask_2 & _wMask_T_34; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_35 = eccMask_3 & _wMask_T_35; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_36 = eccMask_4 & _wMask_T_36; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_37 = eccMask_5 & _wMask_T_37; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_38 = eccMask_6 & _wMask_T_38; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_39 = eccMask_7 & _wMask_T_39; // @[DCache.scala:56:82, :57:{87,108}]
wire _wMask_T_40 = io_req_bits_way_en_0[5]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_41 = io_req_bits_way_en_0[5]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_42 = io_req_bits_way_en_0[5]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_43 = io_req_bits_way_en_0[5]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_44 = io_req_bits_way_en_0[5]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_45 = io_req_bits_way_en_0[5]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_46 = io_req_bits_way_en_0[5]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_47 = io_req_bits_way_en_0[5]; // @[DCache.scala:49:7, :57:108]
wire wMask_40 = eccMask_0 & _wMask_T_40; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_41 = eccMask_1 & _wMask_T_41; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_42 = eccMask_2 & _wMask_T_42; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_43 = eccMask_3 & _wMask_T_43; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_44 = eccMask_4 & _wMask_T_44; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_45 = eccMask_5 & _wMask_T_45; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_46 = eccMask_6 & _wMask_T_46; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_47 = eccMask_7 & _wMask_T_47; // @[DCache.scala:56:82, :57:{87,108}]
wire _wMask_T_48 = io_req_bits_way_en_0[6]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_49 = io_req_bits_way_en_0[6]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_50 = io_req_bits_way_en_0[6]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_51 = io_req_bits_way_en_0[6]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_52 = io_req_bits_way_en_0[6]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_53 = io_req_bits_way_en_0[6]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_54 = io_req_bits_way_en_0[6]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_55 = io_req_bits_way_en_0[6]; // @[DCache.scala:49:7, :57:108]
wire wMask_48 = eccMask_0 & _wMask_T_48; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_49 = eccMask_1 & _wMask_T_49; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_50 = eccMask_2 & _wMask_T_50; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_51 = eccMask_3 & _wMask_T_51; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_52 = eccMask_4 & _wMask_T_52; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_53 = eccMask_5 & _wMask_T_53; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_54 = eccMask_6 & _wMask_T_54; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_55 = eccMask_7 & _wMask_T_55; // @[DCache.scala:56:82, :57:{87,108}]
wire _wMask_T_56 = io_req_bits_way_en_0[7]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_57 = io_req_bits_way_en_0[7]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_58 = io_req_bits_way_en_0[7]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_59 = io_req_bits_way_en_0[7]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_60 = io_req_bits_way_en_0[7]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_61 = io_req_bits_way_en_0[7]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_62 = io_req_bits_way_en_0[7]; // @[DCache.scala:49:7, :57:108]
wire _wMask_T_63 = io_req_bits_way_en_0[7]; // @[DCache.scala:49:7, :57:108]
wire wMask_56 = eccMask_0 & _wMask_T_56; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_57 = eccMask_1 & _wMask_T_57; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_58 = eccMask_2 & _wMask_T_58; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_59 = eccMask_3 & _wMask_T_59; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_60 = eccMask_4 & _wMask_T_60; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_61 = eccMask_5 & _wMask_T_61; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_62 = eccMask_6 & _wMask_T_62; // @[DCache.scala:56:82, :57:{87,108}]
wire wMask_63 = eccMask_7 & _wMask_T_63; // @[DCache.scala:56:82, :57:{87,108}]
wire [63:0] wWords_0 = io_req_bits_wdata_0[63:0]; // @[package.scala:211:50]
wire [63:0] wWords_1 = io_req_bits_wdata_0[127:64]; // @[package.scala:211:50]
wire [7:0] addr = io_req_bits_addr_0[11:4]; // @[DCache.scala:49:7, :59:31]
wire [7:0] _rdata_data_WIRE = addr; // @[DCache.scala:59:31, :77:26]
wire [7:0] _rdata_data_WIRE_1 = addr; // @[DCache.scala:59:31, :77:26]
wire _rdata_T; // @[DCache.scala:72:17]
wire _rdata_data_T_1; // @[DCache.scala:77:39]
wire [7:0] _rdata_WIRE_0; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_2; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_3; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_4; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_5; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_6; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_7; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_8; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_9; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_10; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_11; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_12; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_13; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_14; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_15; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_16; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_17; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_18; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_19; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_20; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_21; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_22; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_23; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_24; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_25; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_26; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_27; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_28; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_29; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_30; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_31; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_32; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_33; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_34; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_35; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_36; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_37; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_38; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_39; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_40; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_41; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_42; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_43; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_44; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_45; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_46; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_47; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_48; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_49; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_50; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_51; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_52; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_53; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_54; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_55; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_56; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_57; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_58; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_59; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_60; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_61; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_62; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_63; // @[DCache.scala:75:32]
wire [63:0] _GEN = {wMask_63, wMask_62, wMask_61, wMask_60, wMask_59, wMask_58, wMask_57, wMask_56, wMask_55, wMask_54, wMask_53, wMask_52, wMask_51, wMask_50, wMask_49, wMask_48, wMask_47, wMask_46, wMask_45, wMask_44, wMask_43, wMask_42, wMask_41, wMask_40, wMask_39, wMask_38, wMask_37, wMask_36, wMask_35, wMask_34, wMask_33, wMask_32, wMask_31, wMask_30, wMask_29, wMask_28, wMask_27, wMask_26, wMask_25, wMask_24, wMask_23, wMask_22, wMask_21, wMask_20, wMask_19, wMask_18, wMask_17, wMask_16, wMask_15, wMask_14, wMask_13, wMask_12, wMask_11, wMask_10, wMask_9, wMask_8, wMask_7, wMask_6, wMask_5, wMask_4, wMask_3, wMask_2, wMask_1, wMask_0}; // @[DescribedSRAM.scala:17:26]
wire _rdata_T_1; // @[DCache.scala:72:17]
wire _rdata_data_T_3; // @[DCache.scala:77:39]
wire [7:0] _rdata_WIRE_1_0; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_1; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_2; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_3; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_4; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_5; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_6; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_7; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_8; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_9; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_10; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_11; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_12; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_13; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_14; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_15; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_16; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_17; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_18; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_19; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_20; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_21; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_22; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_23; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_24; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_25; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_26; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_27; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_28; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_29; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_30; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_31; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_32; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_33; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_34; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_35; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_36; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_37; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_38; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_39; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_40; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_41; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_42; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_43; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_44; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_45; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_46; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_47; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_48; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_49; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_50; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_51; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_52; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_53; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_54; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_55; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_56; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_57; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_58; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_59; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_60; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_61; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_62; // @[DCache.scala:75:32]
wire [7:0] _rdata_WIRE_1_63; // @[DCache.scala:75:32]
wire _rdata_valid_T = io_req_bits_wordMask_0[0]; // @[DCache.scala:49:7, :71:83]
wire _rdata_valid_T_1 = _rdata_valid_T; // @[DCache.scala:71:{60,83}]
wire rdata_valid = io_req_valid_0 & _rdata_valid_T_1; // @[DCache.scala:49:7, :71:{30,60}]
assign _rdata_T = rdata_valid & io_req_bits_write_0; // @[DCache.scala:49:7, :71:30, :72:17]
wire [7:0] rdata_wData_0 = wWords_0[7:0]; // @[package.scala:211:50]
assign _rdata_WIRE_0 = rdata_wData_0; // @[package.scala:211:50]
assign _rdata_WIRE_8 = rdata_wData_0; // @[package.scala:211:50]
assign _rdata_WIRE_16 = rdata_wData_0; // @[package.scala:211:50]
assign _rdata_WIRE_24 = rdata_wData_0; // @[package.scala:211:50]
assign _rdata_WIRE_32 = rdata_wData_0; // @[package.scala:211:50]
assign _rdata_WIRE_40 = rdata_wData_0; // @[package.scala:211:50]
assign _rdata_WIRE_48 = rdata_wData_0; // @[package.scala:211:50]
assign _rdata_WIRE_56 = rdata_wData_0; // @[package.scala:211:50]
wire [7:0] rdata_wData_1 = wWords_0[15:8]; // @[package.scala:211:50]
assign _rdata_WIRE_1 = rdata_wData_1; // @[package.scala:211:50]
assign _rdata_WIRE_9 = rdata_wData_1; // @[package.scala:211:50]
assign _rdata_WIRE_17 = rdata_wData_1; // @[package.scala:211:50]
assign _rdata_WIRE_25 = rdata_wData_1; // @[package.scala:211:50]
assign _rdata_WIRE_33 = rdata_wData_1; // @[package.scala:211:50]
assign _rdata_WIRE_41 = rdata_wData_1; // @[package.scala:211:50]
assign _rdata_WIRE_49 = rdata_wData_1; // @[package.scala:211:50]
assign _rdata_WIRE_57 = rdata_wData_1; // @[package.scala:211:50]
wire [7:0] rdata_wData_2 = wWords_0[23:16]; // @[package.scala:211:50]
assign _rdata_WIRE_2 = rdata_wData_2; // @[package.scala:211:50]
assign _rdata_WIRE_10 = rdata_wData_2; // @[package.scala:211:50]
assign _rdata_WIRE_18 = rdata_wData_2; // @[package.scala:211:50]
assign _rdata_WIRE_26 = rdata_wData_2; // @[package.scala:211:50]
assign _rdata_WIRE_34 = rdata_wData_2; // @[package.scala:211:50]
assign _rdata_WIRE_42 = rdata_wData_2; // @[package.scala:211:50]
assign _rdata_WIRE_50 = rdata_wData_2; // @[package.scala:211:50]
assign _rdata_WIRE_58 = rdata_wData_2; // @[package.scala:211:50]
wire [7:0] rdata_wData_3 = wWords_0[31:24]; // @[package.scala:211:50]
assign _rdata_WIRE_3 = rdata_wData_3; // @[package.scala:211:50]
assign _rdata_WIRE_11 = rdata_wData_3; // @[package.scala:211:50]
assign _rdata_WIRE_19 = rdata_wData_3; // @[package.scala:211:50]
assign _rdata_WIRE_27 = rdata_wData_3; // @[package.scala:211:50]
assign _rdata_WIRE_35 = rdata_wData_3; // @[package.scala:211:50]
assign _rdata_WIRE_43 = rdata_wData_3; // @[package.scala:211:50]
assign _rdata_WIRE_51 = rdata_wData_3; // @[package.scala:211:50]
assign _rdata_WIRE_59 = rdata_wData_3; // @[package.scala:211:50]
wire [7:0] rdata_wData_4 = wWords_0[39:32]; // @[package.scala:211:50]
assign _rdata_WIRE_4 = rdata_wData_4; // @[package.scala:211:50]
assign _rdata_WIRE_12 = rdata_wData_4; // @[package.scala:211:50]
assign _rdata_WIRE_20 = rdata_wData_4; // @[package.scala:211:50]
assign _rdata_WIRE_28 = rdata_wData_4; // @[package.scala:211:50]
assign _rdata_WIRE_36 = rdata_wData_4; // @[package.scala:211:50]
assign _rdata_WIRE_44 = rdata_wData_4; // @[package.scala:211:50]
assign _rdata_WIRE_52 = rdata_wData_4; // @[package.scala:211:50]
assign _rdata_WIRE_60 = rdata_wData_4; // @[package.scala:211:50]
wire [7:0] rdata_wData_5 = wWords_0[47:40]; // @[package.scala:211:50]
assign _rdata_WIRE_5 = rdata_wData_5; // @[package.scala:211:50]
assign _rdata_WIRE_13 = rdata_wData_5; // @[package.scala:211:50]
assign _rdata_WIRE_21 = rdata_wData_5; // @[package.scala:211:50]
assign _rdata_WIRE_29 = rdata_wData_5; // @[package.scala:211:50]
assign _rdata_WIRE_37 = rdata_wData_5; // @[package.scala:211:50]
assign _rdata_WIRE_45 = rdata_wData_5; // @[package.scala:211:50]
assign _rdata_WIRE_53 = rdata_wData_5; // @[package.scala:211:50]
assign _rdata_WIRE_61 = rdata_wData_5; // @[package.scala:211:50]
wire [7:0] rdata_wData_6 = wWords_0[55:48]; // @[package.scala:211:50]
assign _rdata_WIRE_6 = rdata_wData_6; // @[package.scala:211:50]
assign _rdata_WIRE_14 = rdata_wData_6; // @[package.scala:211:50]
assign _rdata_WIRE_22 = rdata_wData_6; // @[package.scala:211:50]
assign _rdata_WIRE_30 = rdata_wData_6; // @[package.scala:211:50]
assign _rdata_WIRE_38 = rdata_wData_6; // @[package.scala:211:50]
assign _rdata_WIRE_46 = rdata_wData_6; // @[package.scala:211:50]
assign _rdata_WIRE_54 = rdata_wData_6; // @[package.scala:211:50]
assign _rdata_WIRE_62 = rdata_wData_6; // @[package.scala:211:50]
wire [7:0] rdata_wData_7 = wWords_0[63:56]; // @[package.scala:211:50]
assign _rdata_WIRE_7 = rdata_wData_7; // @[package.scala:211:50]
assign _rdata_WIRE_15 = rdata_wData_7; // @[package.scala:211:50]
assign _rdata_WIRE_23 = rdata_wData_7; // @[package.scala:211:50]
assign _rdata_WIRE_31 = rdata_wData_7; // @[package.scala:211:50]
assign _rdata_WIRE_39 = rdata_wData_7; // @[package.scala:211:50]
assign _rdata_WIRE_47 = rdata_wData_7; // @[package.scala:211:50]
assign _rdata_WIRE_55 = rdata_wData_7; // @[package.scala:211:50]
assign _rdata_WIRE_63 = rdata_wData_7; // @[package.scala:211:50]
wire _rdata_data_T = ~io_req_bits_write_0; // @[DCache.scala:49:7, :77:42]
assign _rdata_data_T_1 = rdata_valid & _rdata_data_T; // @[DCache.scala:71:30, :77:{39,42}]
wire [15:0] rdata_lo_lo = _rockettile_dcache_data_arrays_0_RW0_rdata[15:0]; // @[package.scala:45:27]
wire [15:0] rdata_lo_hi = _rockettile_dcache_data_arrays_0_RW0_rdata[31:16]; // @[package.scala:45:27]
wire [31:0] rdata_lo = {rdata_lo_hi, rdata_lo_lo}; // @[package.scala:45:27]
wire [15:0] rdata_hi_lo = _rockettile_dcache_data_arrays_0_RW0_rdata[47:32]; // @[package.scala:45:27]
wire [15:0] rdata_hi_hi = _rockettile_dcache_data_arrays_0_RW0_rdata[63:48]; // @[package.scala:45:27]
wire [31:0] rdata_hi = {rdata_hi_hi, rdata_hi_lo}; // @[package.scala:45:27]
wire [63:0] rdata_0_0 = {rdata_hi, rdata_lo}; // @[package.scala:45:27]
wire [15:0] rdata_lo_lo_1 = _rockettile_dcache_data_arrays_0_RW0_rdata[79:64]; // @[package.scala:45:27]
wire [15:0] rdata_lo_hi_1 = _rockettile_dcache_data_arrays_0_RW0_rdata[95:80]; // @[package.scala:45:27]
wire [31:0] rdata_lo_1 = {rdata_lo_hi_1, rdata_lo_lo_1}; // @[package.scala:45:27]
wire [15:0] rdata_hi_lo_1 = _rockettile_dcache_data_arrays_0_RW0_rdata[111:96]; // @[package.scala:45:27]
wire [15:0] rdata_hi_hi_1 = _rockettile_dcache_data_arrays_0_RW0_rdata[127:112]; // @[package.scala:45:27]
wire [31:0] rdata_hi_1 = {rdata_hi_hi_1, rdata_hi_lo_1}; // @[package.scala:45:27]
wire [63:0] rdata_0_1 = {rdata_hi_1, rdata_lo_1}; // @[package.scala:45:27]
wire [15:0] rdata_lo_lo_2 = _rockettile_dcache_data_arrays_0_RW0_rdata[143:128]; // @[package.scala:45:27]
wire [15:0] rdata_lo_hi_2 = _rockettile_dcache_data_arrays_0_RW0_rdata[159:144]; // @[package.scala:45:27]
wire [31:0] rdata_lo_2 = {rdata_lo_hi_2, rdata_lo_lo_2}; // @[package.scala:45:27]
wire [15:0] rdata_hi_lo_2 = _rockettile_dcache_data_arrays_0_RW0_rdata[175:160]; // @[package.scala:45:27]
wire [15:0] rdata_hi_hi_2 = _rockettile_dcache_data_arrays_0_RW0_rdata[191:176]; // @[package.scala:45:27]
wire [31:0] rdata_hi_2 = {rdata_hi_hi_2, rdata_hi_lo_2}; // @[package.scala:45:27]
wire [63:0] rdata_0_2 = {rdata_hi_2, rdata_lo_2}; // @[package.scala:45:27]
wire [15:0] rdata_lo_lo_3 = _rockettile_dcache_data_arrays_0_RW0_rdata[207:192]; // @[package.scala:45:27]
wire [15:0] rdata_lo_hi_3 = _rockettile_dcache_data_arrays_0_RW0_rdata[223:208]; // @[package.scala:45:27]
wire [31:0] rdata_lo_3 = {rdata_lo_hi_3, rdata_lo_lo_3}; // @[package.scala:45:27]
wire [15:0] rdata_hi_lo_3 = _rockettile_dcache_data_arrays_0_RW0_rdata[239:224]; // @[package.scala:45:27]
wire [15:0] rdata_hi_hi_3 = _rockettile_dcache_data_arrays_0_RW0_rdata[255:240]; // @[package.scala:45:27]
wire [31:0] rdata_hi_3 = {rdata_hi_hi_3, rdata_hi_lo_3}; // @[package.scala:45:27]
wire [63:0] rdata_0_3 = {rdata_hi_3, rdata_lo_3}; // @[package.scala:45:27]
wire [15:0] rdata_lo_lo_4 = _rockettile_dcache_data_arrays_0_RW0_rdata[271:256]; // @[package.scala:45:27]
wire [15:0] rdata_lo_hi_4 = _rockettile_dcache_data_arrays_0_RW0_rdata[287:272]; // @[package.scala:45:27]
wire [31:0] rdata_lo_4 = {rdata_lo_hi_4, rdata_lo_lo_4}; // @[package.scala:45:27]
wire [15:0] rdata_hi_lo_4 = _rockettile_dcache_data_arrays_0_RW0_rdata[303:288]; // @[package.scala:45:27]
wire [15:0] rdata_hi_hi_4 = _rockettile_dcache_data_arrays_0_RW0_rdata[319:304]; // @[package.scala:45:27]
wire [31:0] rdata_hi_4 = {rdata_hi_hi_4, rdata_hi_lo_4}; // @[package.scala:45:27]
wire [63:0] rdata_0_4 = {rdata_hi_4, rdata_lo_4}; // @[package.scala:45:27]
wire [15:0] rdata_lo_lo_5 = _rockettile_dcache_data_arrays_0_RW0_rdata[335:320]; // @[package.scala:45:27]
wire [15:0] rdata_lo_hi_5 = _rockettile_dcache_data_arrays_0_RW0_rdata[351:336]; // @[package.scala:45:27]
wire [31:0] rdata_lo_5 = {rdata_lo_hi_5, rdata_lo_lo_5}; // @[package.scala:45:27]
wire [15:0] rdata_hi_lo_5 = _rockettile_dcache_data_arrays_0_RW0_rdata[367:352]; // @[package.scala:45:27]
wire [15:0] rdata_hi_hi_5 = _rockettile_dcache_data_arrays_0_RW0_rdata[383:368]; // @[package.scala:45:27]
wire [31:0] rdata_hi_5 = {rdata_hi_hi_5, rdata_hi_lo_5}; // @[package.scala:45:27]
wire [63:0] rdata_0_5 = {rdata_hi_5, rdata_lo_5}; // @[package.scala:45:27]
wire [15:0] rdata_lo_lo_6 = _rockettile_dcache_data_arrays_0_RW0_rdata[399:384]; // @[package.scala:45:27]
wire [15:0] rdata_lo_hi_6 = _rockettile_dcache_data_arrays_0_RW0_rdata[415:400]; // @[package.scala:45:27]
wire [31:0] rdata_lo_6 = {rdata_lo_hi_6, rdata_lo_lo_6}; // @[package.scala:45:27]
wire [15:0] rdata_hi_lo_6 = _rockettile_dcache_data_arrays_0_RW0_rdata[431:416]; // @[package.scala:45:27]
wire [15:0] rdata_hi_hi_6 = _rockettile_dcache_data_arrays_0_RW0_rdata[447:432]; // @[package.scala:45:27]
wire [31:0] rdata_hi_6 = {rdata_hi_hi_6, rdata_hi_lo_6}; // @[package.scala:45:27]
wire [63:0] rdata_0_6 = {rdata_hi_6, rdata_lo_6}; // @[package.scala:45:27]
wire [15:0] rdata_lo_lo_7 = _rockettile_dcache_data_arrays_0_RW0_rdata[463:448]; // @[package.scala:45:27]
wire [15:0] rdata_lo_hi_7 = _rockettile_dcache_data_arrays_0_RW0_rdata[479:464]; // @[package.scala:45:27]
wire [31:0] rdata_lo_7 = {rdata_lo_hi_7, rdata_lo_lo_7}; // @[package.scala:45:27]
wire [15:0] rdata_hi_lo_7 = _rockettile_dcache_data_arrays_0_RW0_rdata[495:480]; // @[package.scala:45:27]
wire [15:0] rdata_hi_hi_7 = _rockettile_dcache_data_arrays_0_RW0_rdata[511:496]; // @[package.scala:45:27]
wire [31:0] rdata_hi_7 = {rdata_hi_hi_7, rdata_hi_lo_7}; // @[package.scala:45:27]
wire [63:0] rdata_0_7 = {rdata_hi_7, rdata_lo_7}; // @[package.scala:45:27]
wire _rdata_valid_T_2 = io_req_bits_wordMask_0[1]; // @[DCache.scala:49:7, :71:83]
wire _rdata_valid_T_3 = _rdata_valid_T_2; // @[DCache.scala:71:{60,83}]
wire rdata_valid_1 = io_req_valid_0 & _rdata_valid_T_3; // @[DCache.scala:49:7, :71:{30,60}]
assign _rdata_T_1 = rdata_valid_1 & io_req_bits_write_0; // @[DCache.scala:49:7, :71:30, :72:17]
wire [7:0] rdata_wData_0_1 = wWords_1[7:0]; // @[package.scala:211:50]
assign _rdata_WIRE_1_0 = rdata_wData_0_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_8 = rdata_wData_0_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_16 = rdata_wData_0_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_24 = rdata_wData_0_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_32 = rdata_wData_0_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_40 = rdata_wData_0_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_48 = rdata_wData_0_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_56 = rdata_wData_0_1; // @[package.scala:211:50]
wire [7:0] rdata_wData_1_1 = wWords_1[15:8]; // @[package.scala:211:50]
assign _rdata_WIRE_1_1 = rdata_wData_1_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_9 = rdata_wData_1_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_17 = rdata_wData_1_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_25 = rdata_wData_1_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_33 = rdata_wData_1_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_41 = rdata_wData_1_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_49 = rdata_wData_1_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_57 = rdata_wData_1_1; // @[package.scala:211:50]
wire [7:0] rdata_wData_2_1 = wWords_1[23:16]; // @[package.scala:211:50]
assign _rdata_WIRE_1_2 = rdata_wData_2_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_10 = rdata_wData_2_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_18 = rdata_wData_2_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_26 = rdata_wData_2_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_34 = rdata_wData_2_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_42 = rdata_wData_2_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_50 = rdata_wData_2_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_58 = rdata_wData_2_1; // @[package.scala:211:50]
wire [7:0] rdata_wData_3_1 = wWords_1[31:24]; // @[package.scala:211:50]
assign _rdata_WIRE_1_3 = rdata_wData_3_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_11 = rdata_wData_3_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_19 = rdata_wData_3_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_27 = rdata_wData_3_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_35 = rdata_wData_3_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_43 = rdata_wData_3_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_51 = rdata_wData_3_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_59 = rdata_wData_3_1; // @[package.scala:211:50]
wire [7:0] rdata_wData_4_1 = wWords_1[39:32]; // @[package.scala:211:50]
assign _rdata_WIRE_1_4 = rdata_wData_4_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_12 = rdata_wData_4_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_20 = rdata_wData_4_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_28 = rdata_wData_4_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_36 = rdata_wData_4_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_44 = rdata_wData_4_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_52 = rdata_wData_4_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_60 = rdata_wData_4_1; // @[package.scala:211:50]
wire [7:0] rdata_wData_5_1 = wWords_1[47:40]; // @[package.scala:211:50]
assign _rdata_WIRE_1_5 = rdata_wData_5_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_13 = rdata_wData_5_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_21 = rdata_wData_5_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_29 = rdata_wData_5_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_37 = rdata_wData_5_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_45 = rdata_wData_5_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_53 = rdata_wData_5_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_61 = rdata_wData_5_1; // @[package.scala:211:50]
wire [7:0] rdata_wData_6_1 = wWords_1[55:48]; // @[package.scala:211:50]
assign _rdata_WIRE_1_6 = rdata_wData_6_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_14 = rdata_wData_6_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_22 = rdata_wData_6_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_30 = rdata_wData_6_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_38 = rdata_wData_6_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_46 = rdata_wData_6_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_54 = rdata_wData_6_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_62 = rdata_wData_6_1; // @[package.scala:211:50]
wire [7:0] rdata_wData_7_1 = wWords_1[63:56]; // @[package.scala:211:50]
assign _rdata_WIRE_1_7 = rdata_wData_7_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_15 = rdata_wData_7_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_23 = rdata_wData_7_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_31 = rdata_wData_7_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_39 = rdata_wData_7_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_47 = rdata_wData_7_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_55 = rdata_wData_7_1; // @[package.scala:211:50]
assign _rdata_WIRE_1_63 = rdata_wData_7_1; // @[package.scala:211:50]
wire _rdata_data_T_2 = ~io_req_bits_write_0; // @[DCache.scala:49:7, :77:42]
assign _rdata_data_T_3 = rdata_valid_1 & _rdata_data_T_2; // @[DCache.scala:71:30, :77:{39,42}]
wire [15:0] rdata_lo_lo_8 = _rockettile_dcache_data_arrays_1_RW0_rdata[15:0]; // @[package.scala:45:27]
wire [15:0] rdata_lo_hi_8 = _rockettile_dcache_data_arrays_1_RW0_rdata[31:16]; // @[package.scala:45:27]
wire [31:0] rdata_lo_8 = {rdata_lo_hi_8, rdata_lo_lo_8}; // @[package.scala:45:27]
wire [15:0] rdata_hi_lo_8 = _rockettile_dcache_data_arrays_1_RW0_rdata[47:32]; // @[package.scala:45:27]
wire [15:0] rdata_hi_hi_8 = _rockettile_dcache_data_arrays_1_RW0_rdata[63:48]; // @[package.scala:45:27]
wire [31:0] rdata_hi_8 = {rdata_hi_hi_8, rdata_hi_lo_8}; // @[package.scala:45:27]
wire [63:0] rdata_1_0 = {rdata_hi_8, rdata_lo_8}; // @[package.scala:45:27]
wire [15:0] rdata_lo_lo_9 = _rockettile_dcache_data_arrays_1_RW0_rdata[79:64]; // @[package.scala:45:27]
wire [15:0] rdata_lo_hi_9 = _rockettile_dcache_data_arrays_1_RW0_rdata[95:80]; // @[package.scala:45:27]
wire [31:0] rdata_lo_9 = {rdata_lo_hi_9, rdata_lo_lo_9}; // @[package.scala:45:27]
wire [15:0] rdata_hi_lo_9 = _rockettile_dcache_data_arrays_1_RW0_rdata[111:96]; // @[package.scala:45:27]
wire [15:0] rdata_hi_hi_9 = _rockettile_dcache_data_arrays_1_RW0_rdata[127:112]; // @[package.scala:45:27]
wire [31:0] rdata_hi_9 = {rdata_hi_hi_9, rdata_hi_lo_9}; // @[package.scala:45:27]
wire [63:0] rdata_1_1 = {rdata_hi_9, rdata_lo_9}; // @[package.scala:45:27]
wire [15:0] rdata_lo_lo_10 = _rockettile_dcache_data_arrays_1_RW0_rdata[143:128]; // @[package.scala:45:27]
wire [15:0] rdata_lo_hi_10 = _rockettile_dcache_data_arrays_1_RW0_rdata[159:144]; // @[package.scala:45:27]
wire [31:0] rdata_lo_10 = {rdata_lo_hi_10, rdata_lo_lo_10}; // @[package.scala:45:27]
wire [15:0] rdata_hi_lo_10 = _rockettile_dcache_data_arrays_1_RW0_rdata[175:160]; // @[package.scala:45:27]
wire [15:0] rdata_hi_hi_10 = _rockettile_dcache_data_arrays_1_RW0_rdata[191:176]; // @[package.scala:45:27]
wire [31:0] rdata_hi_10 = {rdata_hi_hi_10, rdata_hi_lo_10}; // @[package.scala:45:27]
wire [63:0] rdata_1_2 = {rdata_hi_10, rdata_lo_10}; // @[package.scala:45:27]
wire [15:0] rdata_lo_lo_11 = _rockettile_dcache_data_arrays_1_RW0_rdata[207:192]; // @[package.scala:45:27]
wire [15:0] rdata_lo_hi_11 = _rockettile_dcache_data_arrays_1_RW0_rdata[223:208]; // @[package.scala:45:27]
wire [31:0] rdata_lo_11 = {rdata_lo_hi_11, rdata_lo_lo_11}; // @[package.scala:45:27]
wire [15:0] rdata_hi_lo_11 = _rockettile_dcache_data_arrays_1_RW0_rdata[239:224]; // @[package.scala:45:27]
wire [15:0] rdata_hi_hi_11 = _rockettile_dcache_data_arrays_1_RW0_rdata[255:240]; // @[package.scala:45:27]
wire [31:0] rdata_hi_11 = {rdata_hi_hi_11, rdata_hi_lo_11}; // @[package.scala:45:27]
wire [63:0] rdata_1_3 = {rdata_hi_11, rdata_lo_11}; // @[package.scala:45:27]
wire [15:0] rdata_lo_lo_12 = _rockettile_dcache_data_arrays_1_RW0_rdata[271:256]; // @[package.scala:45:27]
wire [15:0] rdata_lo_hi_12 = _rockettile_dcache_data_arrays_1_RW0_rdata[287:272]; // @[package.scala:45:27]
wire [31:0] rdata_lo_12 = {rdata_lo_hi_12, rdata_lo_lo_12}; // @[package.scala:45:27]
wire [15:0] rdata_hi_lo_12 = _rockettile_dcache_data_arrays_1_RW0_rdata[303:288]; // @[package.scala:45:27]
wire [15:0] rdata_hi_hi_12 = _rockettile_dcache_data_arrays_1_RW0_rdata[319:304]; // @[package.scala:45:27]
wire [31:0] rdata_hi_12 = {rdata_hi_hi_12, rdata_hi_lo_12}; // @[package.scala:45:27]
wire [63:0] rdata_1_4 = {rdata_hi_12, rdata_lo_12}; // @[package.scala:45:27]
wire [15:0] rdata_lo_lo_13 = _rockettile_dcache_data_arrays_1_RW0_rdata[335:320]; // @[package.scala:45:27]
wire [15:0] rdata_lo_hi_13 = _rockettile_dcache_data_arrays_1_RW0_rdata[351:336]; // @[package.scala:45:27]
wire [31:0] rdata_lo_13 = {rdata_lo_hi_13, rdata_lo_lo_13}; // @[package.scala:45:27]
wire [15:0] rdata_hi_lo_13 = _rockettile_dcache_data_arrays_1_RW0_rdata[367:352]; // @[package.scala:45:27]
wire [15:0] rdata_hi_hi_13 = _rockettile_dcache_data_arrays_1_RW0_rdata[383:368]; // @[package.scala:45:27]
wire [31:0] rdata_hi_13 = {rdata_hi_hi_13, rdata_hi_lo_13}; // @[package.scala:45:27]
wire [63:0] rdata_1_5 = {rdata_hi_13, rdata_lo_13}; // @[package.scala:45:27]
wire [15:0] rdata_lo_lo_14 = _rockettile_dcache_data_arrays_1_RW0_rdata[399:384]; // @[package.scala:45:27]
wire [15:0] rdata_lo_hi_14 = _rockettile_dcache_data_arrays_1_RW0_rdata[415:400]; // @[package.scala:45:27]
wire [31:0] rdata_lo_14 = {rdata_lo_hi_14, rdata_lo_lo_14}; // @[package.scala:45:27]
wire [15:0] rdata_hi_lo_14 = _rockettile_dcache_data_arrays_1_RW0_rdata[431:416]; // @[package.scala:45:27]
wire [15:0] rdata_hi_hi_14 = _rockettile_dcache_data_arrays_1_RW0_rdata[447:432]; // @[package.scala:45:27]
wire [31:0] rdata_hi_14 = {rdata_hi_hi_14, rdata_hi_lo_14}; // @[package.scala:45:27]
wire [63:0] rdata_1_6 = {rdata_hi_14, rdata_lo_14}; // @[package.scala:45:27]
wire [15:0] rdata_lo_lo_15 = _rockettile_dcache_data_arrays_1_RW0_rdata[463:448]; // @[package.scala:45:27]
wire [15:0] rdata_lo_hi_15 = _rockettile_dcache_data_arrays_1_RW0_rdata[479:464]; // @[package.scala:45:27]
wire [31:0] rdata_lo_15 = {rdata_lo_hi_15, rdata_lo_lo_15}; // @[package.scala:45:27]
wire [15:0] rdata_hi_lo_15 = _rockettile_dcache_data_arrays_1_RW0_rdata[495:480]; // @[package.scala:45:27]
wire [15:0] rdata_hi_hi_15 = _rockettile_dcache_data_arrays_1_RW0_rdata[511:496]; // @[package.scala:45:27]
wire [31:0] rdata_hi_15 = {rdata_hi_hi_15, rdata_hi_lo_15}; // @[package.scala:45:27]
wire [63:0] rdata_1_7 = {rdata_hi_15, rdata_lo_15}; // @[package.scala:45:27]
assign _io_resp_0_T = {rdata_1_0, rdata_0_0}; // @[package.scala:45:27]
assign io_resp_0_0 = _io_resp_0_T; // @[package.scala:45:27]
assign _io_resp_1_T = {rdata_1_1, rdata_0_1}; // @[package.scala:45:27]
assign io_resp_1_0 = _io_resp_1_T; // @[package.scala:45:27]
assign _io_resp_2_T = {rdata_1_2, rdata_0_2}; // @[package.scala:45:27]
assign io_resp_2_0 = _io_resp_2_T; // @[package.scala:45:27]
assign _io_resp_3_T = {rdata_1_3, rdata_0_3}; // @[package.scala:45:27]
assign io_resp_3_0 = _io_resp_3_T; // @[package.scala:45:27]
assign _io_resp_4_T = {rdata_1_4, rdata_0_4}; // @[package.scala:45:27]
assign io_resp_4_0 = _io_resp_4_T; // @[package.scala:45:27]
assign _io_resp_5_T = {rdata_1_5, rdata_0_5}; // @[package.scala:45:27]
assign io_resp_5_0 = _io_resp_5_T; // @[package.scala:45:27]
assign _io_resp_6_T = {rdata_1_6, rdata_0_6}; // @[package.scala:45:27]
assign io_resp_6_0 = _io_resp_6_T; // @[package.scala:45:27]
assign _io_resp_7_T = {rdata_1_7, rdata_0_7}; // @[package.scala:45:27]
assign io_resp_7_0 = _io_resp_7_T; // @[package.scala:45:27]
rockettile_dcache_data_arrays_0 rockettile_dcache_data_arrays_0 ( // @[DescribedSRAM.scala:17:26]
.RW0_addr (_rdata_T ? addr : _rdata_data_WIRE), // @[DescribedSRAM.scala:17:26]
.RW0_en (_rdata_data_T_1 | _rdata_T), // @[DescribedSRAM.scala:17:26]
.RW0_clk (clock),
.RW0_wmode (io_req_bits_write_0), // @[DCache.scala:49:7]
.RW0_wdata ({_rdata_WIRE_63, _rdata_WIRE_62, _rdata_WIRE_61, _rdata_WIRE_60, _rdata_WIRE_59, _rdata_WIRE_58, _rdata_WIRE_57, _rdata_WIRE_56, _rdata_WIRE_55, _rdata_WIRE_54, _rdata_WIRE_53, _rdata_WIRE_52, _rdata_WIRE_51, _rdata_WIRE_50, _rdata_WIRE_49, _rdata_WIRE_48, _rdata_WIRE_47, _rdata_WIRE_46, _rdata_WIRE_45, _rdata_WIRE_44, _rdata_WIRE_43, _rdata_WIRE_42, _rdata_WIRE_41, _rdata_WIRE_40, _rdata_WIRE_39, _rdata_WIRE_38, _rdata_WIRE_37, _rdata_WIRE_36, _rdata_WIRE_35, _rdata_WIRE_34, _rdata_WIRE_33, _rdata_WIRE_32, _rdata_WIRE_31, _rdata_WIRE_30, _rdata_WIRE_29, _rdata_WIRE_28, _rdata_WIRE_27, _rdata_WIRE_26, _rdata_WIRE_25, _rdata_WIRE_24, _rdata_WIRE_23, _rdata_WIRE_22, _rdata_WIRE_21, _rdata_WIRE_20, _rdata_WIRE_19, _rdata_WIRE_18, _rdata_WIRE_17, _rdata_WIRE_16, _rdata_WIRE_15, _rdata_WIRE_14, _rdata_WIRE_13, _rdata_WIRE_12, _rdata_WIRE_11, _rdata_WIRE_10, _rdata_WIRE_9, _rdata_WIRE_8, _rdata_WIRE_7, _rdata_WIRE_6, _rdata_WIRE_5, _rdata_WIRE_4, _rdata_WIRE_3, _rdata_WIRE_2, _rdata_WIRE_1, _rdata_WIRE_0}), // @[DescribedSRAM.scala:17:26]
.RW0_rdata (_rockettile_dcache_data_arrays_0_RW0_rdata),
.RW0_wmask (_GEN) // @[DescribedSRAM.scala:17:26]
); // @[DescribedSRAM.scala:17:26]
rockettile_dcache_data_arrays_1 rockettile_dcache_data_arrays_1 ( // @[DescribedSRAM.scala:17:26]
.RW0_addr (_rdata_T_1 ? addr : _rdata_data_WIRE_1), // @[DescribedSRAM.scala:17:26]
.RW0_en (_rdata_data_T_3 | _rdata_T_1), // @[DescribedSRAM.scala:17:26]
.RW0_clk (clock),
.RW0_wmode (io_req_bits_write_0), // @[DCache.scala:49:7]
.RW0_wdata ({_rdata_WIRE_1_63, _rdata_WIRE_1_62, _rdata_WIRE_1_61, _rdata_WIRE_1_60, _rdata_WIRE_1_59, _rdata_WIRE_1_58, _rdata_WIRE_1_57, _rdata_WIRE_1_56, _rdata_WIRE_1_55, _rdata_WIRE_1_54, _rdata_WIRE_1_53, _rdata_WIRE_1_52, _rdata_WIRE_1_51, _rdata_WIRE_1_50, _rdata_WIRE_1_49, _rdata_WIRE_1_48, _rdata_WIRE_1_47, _rdata_WIRE_1_46, _rdata_WIRE_1_45, _rdata_WIRE_1_44, _rdata_WIRE_1_43, _rdata_WIRE_1_42, _rdata_WIRE_1_41, _rdata_WIRE_1_40, _rdata_WIRE_1_39, _rdata_WIRE_1_38, _rdata_WIRE_1_37, _rdata_WIRE_1_36, _rdata_WIRE_1_35, _rdata_WIRE_1_34, _rdata_WIRE_1_33, _rdata_WIRE_1_32, _rdata_WIRE_1_31, _rdata_WIRE_1_30, _rdata_WIRE_1_29, _rdata_WIRE_1_28, _rdata_WIRE_1_27, _rdata_WIRE_1_26, _rdata_WIRE_1_25, _rdata_WIRE_1_24, _rdata_WIRE_1_23, _rdata_WIRE_1_22, _rdata_WIRE_1_21, _rdata_WIRE_1_20, _rdata_WIRE_1_19, _rdata_WIRE_1_18, _rdata_WIRE_1_17, _rdata_WIRE_1_16, _rdata_WIRE_1_15, _rdata_WIRE_1_14, _rdata_WIRE_1_13, _rdata_WIRE_1_12, _rdata_WIRE_1_11, _rdata_WIRE_1_10, _rdata_WIRE_1_9, _rdata_WIRE_1_8, _rdata_WIRE_1_7, _rdata_WIRE_1_6, _rdata_WIRE_1_5, _rdata_WIRE_1_4, _rdata_WIRE_1_3, _rdata_WIRE_1_2, _rdata_WIRE_1_1, _rdata_WIRE_1_0}), // @[DescribedSRAM.scala:17:26]
.RW0_rdata (_rockettile_dcache_data_arrays_1_RW0_rdata),
.RW0_wmask (_GEN) // @[DescribedSRAM.scala:17:26]
); // @[DescribedSRAM.scala:17:26]
assign io_resp_0 = io_resp_0_0; // @[DCache.scala:49:7]
assign io_resp_1 = io_resp_1_0; // @[DCache.scala:49:7]
assign io_resp_2 = io_resp_2_0; // @[DCache.scala:49:7]
assign io_resp_3 = io_resp_3_0; // @[DCache.scala:49:7]
assign io_resp_4 = io_resp_4_0; // @[DCache.scala:49:7]
assign io_resp_5 = io_resp_5_0; // @[DCache.scala:49:7]
assign io_resp_6 = io_resp_6_0; // @[DCache.scala:49:7]
assign io_resp_7 = io_resp_7_0; // @[DCache.scala:49:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_68( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19]
wire _sync_2_T = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h1; // @[SynchronizerReg.scala:51:87, :54:22, :68:19]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File util.scala:
//******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v4.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v4.common.{MicroOp}
import boom.v4.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, flush: Bool, uop: MicroOp): Bool = {
return apply(brupdate, flush, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, flush: Bool, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask) || flush
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: T): Bool = {
return apply(brupdate, flush, bundle.uop)
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Bool = {
return apply(brupdate, flush, bundle.bits)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, flush, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v4.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U, IS_N}
def apply(i: UInt, isel: UInt): UInt = {
val ip = Mux(isel === IS_N, 0.U(LONGEST_IMM_SZ.W), i)
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0)
}
}
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
object IsYoungerMask
{
def apply(i: UInt, head: UInt, n: Integer): UInt = {
val hi_mask = ~MaskLower(UIntToOH(i)(n-1,0))
val lo_mask = ~MaskUpper(UIntToOH(head)(n-1,0))
Mux(i < head, hi_mask & lo_mask, hi_mask | lo_mask)(n-1,0)
}
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v4.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v4.common.MicroOp => Bool = u => true.B, fastDeq: Boolean = false)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
if (fastDeq && entries > 1) {
// Pipeline dequeue selection so the mux gets an entire cycle
val main = Module(new BranchKillableQueue(gen, entries-1, flush_fn, false))
val out_reg = Reg(gen)
val out_valid = RegInit(false.B)
val out_uop = Reg(new MicroOp)
main.io.enq <> io.enq
main.io.brupdate := io.brupdate
main.io.flush := io.flush
io.empty := main.io.empty && !out_valid
io.count := main.io.count + out_valid
io.deq.valid := out_valid
io.deq.bits := out_reg
io.deq.bits.uop := out_uop
out_uop := UpdateBrMask(io.brupdate, out_uop)
out_valid := out_valid && !IsKilledByBranch(io.brupdate, false.B, out_uop) && !(io.flush && flush_fn(out_uop))
main.io.deq.ready := false.B
when (io.deq.fire || !out_valid) {
out_valid := main.io.deq.valid && !IsKilledByBranch(io.brupdate, false.B, main.io.deq.bits.uop) && !(io.flush && flush_fn(main.io.deq.bits.uop))
out_reg := main.io.deq.bits
out_uop := UpdateBrMask(io.brupdate, main.io.deq.bits.uop)
main.io.deq.ready := true.B
}
} else {
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire && !IsKilledByBranch(io.brupdate, false.B, io.enq.bits.uop) && !(io.flush && flush_fn(io.enq.bits.uop)))
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, false.B, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value)
io.deq.bits := out
val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
}
class BranchKillablePipeline[T <: boom.v4.common.HasBoomUOP](gen: T, stages: Int)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val req = Input(Valid(gen))
val flush = Input(Bool())
val brupdate = Input(new BrUpdateInfo)
val resp = Output(Vec(stages, Valid(gen)))
})
require(stages > 0)
val uops = Reg(Vec(stages, Valid(gen)))
uops(0).valid := io.req.valid && !IsKilledByBranch(io.brupdate, io.flush, io.req.bits)
uops(0).bits := UpdateBrMask(io.brupdate, io.req.bits)
for (i <- 1 until stages) {
uops(i).valid := uops(i-1).valid && !IsKilledByBranch(io.brupdate, io.flush, uops(i-1).bits)
uops(i).bits := UpdateBrMask(io.brupdate, uops(i-1).bits)
}
for (i <- 0 until stages) { when (reset.asBool) { uops(i).valid := false.B } }
io.resp := uops
}
| module BranchKillableQueue_22( // @[util.scala:458:7]
input clock, // @[util.scala:458:7]
input reset, // @[util.scala:458:7]
output io_enq_ready, // @[util.scala:463:14]
input io_enq_valid, // @[util.scala:463:14]
input [31:0] io_enq_bits_uop_inst, // @[util.scala:463:14]
input [31:0] io_enq_bits_uop_debug_inst, // @[util.scala:463:14]
input io_enq_bits_uop_is_rvc, // @[util.scala:463:14]
input [33:0] io_enq_bits_uop_debug_pc, // @[util.scala:463:14]
input io_enq_bits_uop_iq_type_0, // @[util.scala:463:14]
input io_enq_bits_uop_iq_type_1, // @[util.scala:463:14]
input io_enq_bits_uop_iq_type_2, // @[util.scala:463:14]
input io_enq_bits_uop_iq_type_3, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_0, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_1, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_2, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_3, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_4, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_5, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_6, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_7, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_8, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_9, // @[util.scala:463:14]
input io_enq_bits_uop_iw_issued, // @[util.scala:463:14]
input io_enq_bits_uop_iw_issued_partial_agen, // @[util.scala:463:14]
input io_enq_bits_uop_iw_issued_partial_dgen, // @[util.scala:463:14]
input io_enq_bits_uop_iw_p1_speculative_child, // @[util.scala:463:14]
input io_enq_bits_uop_iw_p2_speculative_child, // @[util.scala:463:14]
input io_enq_bits_uop_iw_p1_bypass_hint, // @[util.scala:463:14]
input io_enq_bits_uop_iw_p2_bypass_hint, // @[util.scala:463:14]
input io_enq_bits_uop_iw_p3_bypass_hint, // @[util.scala:463:14]
input io_enq_bits_uop_dis_col_sel, // @[util.scala:463:14]
input [3:0] io_enq_bits_uop_br_mask, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_br_tag, // @[util.scala:463:14]
input [3:0] io_enq_bits_uop_br_type, // @[util.scala:463:14]
input io_enq_bits_uop_is_sfb, // @[util.scala:463:14]
input io_enq_bits_uop_is_fence, // @[util.scala:463:14]
input io_enq_bits_uop_is_fencei, // @[util.scala:463:14]
input io_enq_bits_uop_is_sfence, // @[util.scala:463:14]
input io_enq_bits_uop_is_amo, // @[util.scala:463:14]
input io_enq_bits_uop_is_eret, // @[util.scala:463:14]
input io_enq_bits_uop_is_sys_pc2epc, // @[util.scala:463:14]
input io_enq_bits_uop_is_rocc, // @[util.scala:463:14]
input io_enq_bits_uop_is_mov, // @[util.scala:463:14]
input [3:0] io_enq_bits_uop_ftq_idx, // @[util.scala:463:14]
input io_enq_bits_uop_edge_inst, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_pc_lob, // @[util.scala:463:14]
input io_enq_bits_uop_taken, // @[util.scala:463:14]
input io_enq_bits_uop_imm_rename, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_imm_sel, // @[util.scala:463:14]
input [4:0] io_enq_bits_uop_pimm, // @[util.scala:463:14]
input [19:0] io_enq_bits_uop_imm_packed, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_op1_sel, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_op2_sel, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_ldst, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_wen, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_ren1, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_ren2, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_ren3, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_swap12, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_swap23, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_fp_ctrl_typeTagIn, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_fp_ctrl_typeTagOut, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_fromint, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_toint, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_fastpipe, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_fma, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_div, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_sqrt, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_wflags, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_vec, // @[util.scala:463:14]
input [4:0] io_enq_bits_uop_rob_idx, // @[util.scala:463:14]
input [3:0] io_enq_bits_uop_ldq_idx, // @[util.scala:463:14]
input [3:0] io_enq_bits_uop_stq_idx, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_rxq_idx, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_pdst, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_prs1, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_prs2, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_prs3, // @[util.scala:463:14]
input [3:0] io_enq_bits_uop_ppred, // @[util.scala:463:14]
input io_enq_bits_uop_prs1_busy, // @[util.scala:463:14]
input io_enq_bits_uop_prs2_busy, // @[util.scala:463:14]
input io_enq_bits_uop_prs3_busy, // @[util.scala:463:14]
input io_enq_bits_uop_ppred_busy, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_stale_pdst, // @[util.scala:463:14]
input io_enq_bits_uop_exception, // @[util.scala:463:14]
input [63:0] io_enq_bits_uop_exc_cause, // @[util.scala:463:14]
input [4:0] io_enq_bits_uop_mem_cmd, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_mem_size, // @[util.scala:463:14]
input io_enq_bits_uop_mem_signed, // @[util.scala:463:14]
input io_enq_bits_uop_uses_ldq, // @[util.scala:463:14]
input io_enq_bits_uop_uses_stq, // @[util.scala:463:14]
input io_enq_bits_uop_is_unique, // @[util.scala:463:14]
input io_enq_bits_uop_flush_on_commit, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_csr_cmd, // @[util.scala:463:14]
input io_enq_bits_uop_ldst_is_rs1, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_ldst, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_lrs1, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_lrs2, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_lrs3, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_dst_rtype, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_lrs1_rtype, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_lrs2_rtype, // @[util.scala:463:14]
input io_enq_bits_uop_frs3_en, // @[util.scala:463:14]
input io_enq_bits_uop_fcn_dw, // @[util.scala:463:14]
input [4:0] io_enq_bits_uop_fcn_op, // @[util.scala:463:14]
input io_enq_bits_uop_fp_val, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_fp_rm, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_fp_typ, // @[util.scala:463:14]
input io_enq_bits_uop_xcpt_pf_if, // @[util.scala:463:14]
input io_enq_bits_uop_xcpt_ae_if, // @[util.scala:463:14]
input io_enq_bits_uop_xcpt_ma_if, // @[util.scala:463:14]
input io_enq_bits_uop_bp_debug_if, // @[util.scala:463:14]
input io_enq_bits_uop_bp_xcpt_if, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_debug_fsrc, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_debug_tsrc, // @[util.scala:463:14]
input [33:0] io_enq_bits_addr, // @[util.scala:463:14]
input [63:0] io_enq_bits_data, // @[util.scala:463:14]
input io_enq_bits_is_hella, // @[util.scala:463:14]
input io_enq_bits_tag_match, // @[util.scala:463:14]
input [1:0] io_enq_bits_old_meta_coh_state, // @[util.scala:463:14]
input [21:0] io_enq_bits_old_meta_tag, // @[util.scala:463:14]
input [1:0] io_enq_bits_way_en, // @[util.scala:463:14]
input [4:0] io_enq_bits_sdq_id, // @[util.scala:463:14]
input io_deq_ready, // @[util.scala:463:14]
output io_deq_valid, // @[util.scala:463:14]
output [31:0] io_deq_bits_uop_inst, // @[util.scala:463:14]
output [31:0] io_deq_bits_uop_debug_inst, // @[util.scala:463:14]
output io_deq_bits_uop_is_rvc, // @[util.scala:463:14]
output [33:0] io_deq_bits_uop_debug_pc, // @[util.scala:463:14]
output io_deq_bits_uop_iq_type_0, // @[util.scala:463:14]
output io_deq_bits_uop_iq_type_1, // @[util.scala:463:14]
output io_deq_bits_uop_iq_type_2, // @[util.scala:463:14]
output io_deq_bits_uop_iq_type_3, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_0, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_1, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_2, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_3, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_4, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_5, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_6, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_7, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_8, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_9, // @[util.scala:463:14]
output io_deq_bits_uop_iw_issued, // @[util.scala:463:14]
output io_deq_bits_uop_iw_issued_partial_agen, // @[util.scala:463:14]
output io_deq_bits_uop_iw_issued_partial_dgen, // @[util.scala:463:14]
output io_deq_bits_uop_iw_p1_speculative_child, // @[util.scala:463:14]
output io_deq_bits_uop_iw_p2_speculative_child, // @[util.scala:463:14]
output io_deq_bits_uop_iw_p1_bypass_hint, // @[util.scala:463:14]
output io_deq_bits_uop_iw_p2_bypass_hint, // @[util.scala:463:14]
output io_deq_bits_uop_iw_p3_bypass_hint, // @[util.scala:463:14]
output io_deq_bits_uop_dis_col_sel, // @[util.scala:463:14]
output [3:0] io_deq_bits_uop_br_mask, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_br_tag, // @[util.scala:463:14]
output [3:0] io_deq_bits_uop_br_type, // @[util.scala:463:14]
output io_deq_bits_uop_is_sfb, // @[util.scala:463:14]
output io_deq_bits_uop_is_fence, // @[util.scala:463:14]
output io_deq_bits_uop_is_fencei, // @[util.scala:463:14]
output io_deq_bits_uop_is_sfence, // @[util.scala:463:14]
output io_deq_bits_uop_is_amo, // @[util.scala:463:14]
output io_deq_bits_uop_is_eret, // @[util.scala:463:14]
output io_deq_bits_uop_is_sys_pc2epc, // @[util.scala:463:14]
output io_deq_bits_uop_is_rocc, // @[util.scala:463:14]
output io_deq_bits_uop_is_mov, // @[util.scala:463:14]
output [3:0] io_deq_bits_uop_ftq_idx, // @[util.scala:463:14]
output io_deq_bits_uop_edge_inst, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_pc_lob, // @[util.scala:463:14]
output io_deq_bits_uop_taken, // @[util.scala:463:14]
output io_deq_bits_uop_imm_rename, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_imm_sel, // @[util.scala:463:14]
output [4:0] io_deq_bits_uop_pimm, // @[util.scala:463:14]
output [19:0] io_deq_bits_uop_imm_packed, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_op1_sel, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_op2_sel, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_ldst, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_wen, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_ren1, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_ren2, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_ren3, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_swap12, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_swap23, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_fp_ctrl_typeTagIn, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_fp_ctrl_typeTagOut, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_fromint, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_toint, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_fastpipe, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_fma, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_div, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_sqrt, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_wflags, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_vec, // @[util.scala:463:14]
output [4:0] io_deq_bits_uop_rob_idx, // @[util.scala:463:14]
output [3:0] io_deq_bits_uop_ldq_idx, // @[util.scala:463:14]
output [3:0] io_deq_bits_uop_stq_idx, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_rxq_idx, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_pdst, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_prs1, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_prs2, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_prs3, // @[util.scala:463:14]
output [3:0] io_deq_bits_uop_ppred, // @[util.scala:463:14]
output io_deq_bits_uop_prs1_busy, // @[util.scala:463:14]
output io_deq_bits_uop_prs2_busy, // @[util.scala:463:14]
output io_deq_bits_uop_prs3_busy, // @[util.scala:463:14]
output io_deq_bits_uop_ppred_busy, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_stale_pdst, // @[util.scala:463:14]
output io_deq_bits_uop_exception, // @[util.scala:463:14]
output [63:0] io_deq_bits_uop_exc_cause, // @[util.scala:463:14]
output [4:0] io_deq_bits_uop_mem_cmd, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_mem_size, // @[util.scala:463:14]
output io_deq_bits_uop_mem_signed, // @[util.scala:463:14]
output io_deq_bits_uop_uses_ldq, // @[util.scala:463:14]
output io_deq_bits_uop_uses_stq, // @[util.scala:463:14]
output io_deq_bits_uop_is_unique, // @[util.scala:463:14]
output io_deq_bits_uop_flush_on_commit, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_csr_cmd, // @[util.scala:463:14]
output io_deq_bits_uop_ldst_is_rs1, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_ldst, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_lrs1, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_lrs2, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_lrs3, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_dst_rtype, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_lrs1_rtype, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_lrs2_rtype, // @[util.scala:463:14]
output io_deq_bits_uop_frs3_en, // @[util.scala:463:14]
output io_deq_bits_uop_fcn_dw, // @[util.scala:463:14]
output [4:0] io_deq_bits_uop_fcn_op, // @[util.scala:463:14]
output io_deq_bits_uop_fp_val, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_fp_rm, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_fp_typ, // @[util.scala:463:14]
output io_deq_bits_uop_xcpt_pf_if, // @[util.scala:463:14]
output io_deq_bits_uop_xcpt_ae_if, // @[util.scala:463:14]
output io_deq_bits_uop_xcpt_ma_if, // @[util.scala:463:14]
output io_deq_bits_uop_bp_debug_if, // @[util.scala:463:14]
output io_deq_bits_uop_bp_xcpt_if, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_debug_fsrc, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_debug_tsrc, // @[util.scala:463:14]
output [33:0] io_deq_bits_addr, // @[util.scala:463:14]
output [63:0] io_deq_bits_data, // @[util.scala:463:14]
output io_deq_bits_is_hella, // @[util.scala:463:14]
output io_deq_bits_tag_match, // @[util.scala:463:14]
output [1:0] io_deq_bits_old_meta_coh_state, // @[util.scala:463:14]
output [21:0] io_deq_bits_old_meta_tag, // @[util.scala:463:14]
output [1:0] io_deq_bits_way_en, // @[util.scala:463:14]
output [4:0] io_deq_bits_sdq_id, // @[util.scala:463:14]
output io_empty, // @[util.scala:463:14]
output [3:0] io_count // @[util.scala:463:14]
);
wire [130:0] _ram_ext_R0_data; // @[util.scala:503:22]
wire io_enq_valid_0 = io_enq_valid; // @[util.scala:458:7]
wire [31:0] io_enq_bits_uop_inst_0 = io_enq_bits_uop_inst; // @[util.scala:458:7]
wire [31:0] io_enq_bits_uop_debug_inst_0 = io_enq_bits_uop_debug_inst; // @[util.scala:458:7]
wire io_enq_bits_uop_is_rvc_0 = io_enq_bits_uop_is_rvc; // @[util.scala:458:7]
wire [33:0] io_enq_bits_uop_debug_pc_0 = io_enq_bits_uop_debug_pc; // @[util.scala:458:7]
wire io_enq_bits_uop_iq_type_0_0 = io_enq_bits_uop_iq_type_0; // @[util.scala:458:7]
wire io_enq_bits_uop_iq_type_1_0 = io_enq_bits_uop_iq_type_1; // @[util.scala:458:7]
wire io_enq_bits_uop_iq_type_2_0 = io_enq_bits_uop_iq_type_2; // @[util.scala:458:7]
wire io_enq_bits_uop_iq_type_3_0 = io_enq_bits_uop_iq_type_3; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_0_0 = io_enq_bits_uop_fu_code_0; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_1_0 = io_enq_bits_uop_fu_code_1; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_2_0 = io_enq_bits_uop_fu_code_2; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_3_0 = io_enq_bits_uop_fu_code_3; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_4_0 = io_enq_bits_uop_fu_code_4; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_5_0 = io_enq_bits_uop_fu_code_5; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_6_0 = io_enq_bits_uop_fu_code_6; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_7_0 = io_enq_bits_uop_fu_code_7; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_8_0 = io_enq_bits_uop_fu_code_8; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_9_0 = io_enq_bits_uop_fu_code_9; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_issued_0 = io_enq_bits_uop_iw_issued; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_issued_partial_agen_0 = io_enq_bits_uop_iw_issued_partial_agen; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_issued_partial_dgen_0 = io_enq_bits_uop_iw_issued_partial_dgen; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_p1_speculative_child_0 = io_enq_bits_uop_iw_p1_speculative_child; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_p2_speculative_child_0 = io_enq_bits_uop_iw_p2_speculative_child; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_p1_bypass_hint_0 = io_enq_bits_uop_iw_p1_bypass_hint; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_p2_bypass_hint_0 = io_enq_bits_uop_iw_p2_bypass_hint; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_p3_bypass_hint_0 = io_enq_bits_uop_iw_p3_bypass_hint; // @[util.scala:458:7]
wire io_enq_bits_uop_dis_col_sel_0 = io_enq_bits_uop_dis_col_sel; // @[util.scala:458:7]
wire [3:0] io_enq_bits_uop_br_mask_0 = io_enq_bits_uop_br_mask; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_br_tag_0 = io_enq_bits_uop_br_tag; // @[util.scala:458:7]
wire [3:0] io_enq_bits_uop_br_type_0 = io_enq_bits_uop_br_type; // @[util.scala:458:7]
wire io_enq_bits_uop_is_sfb_0 = io_enq_bits_uop_is_sfb; // @[util.scala:458:7]
wire io_enq_bits_uop_is_fence_0 = io_enq_bits_uop_is_fence; // @[util.scala:458:7]
wire io_enq_bits_uop_is_fencei_0 = io_enq_bits_uop_is_fencei; // @[util.scala:458:7]
wire io_enq_bits_uop_is_sfence_0 = io_enq_bits_uop_is_sfence; // @[util.scala:458:7]
wire io_enq_bits_uop_is_amo_0 = io_enq_bits_uop_is_amo; // @[util.scala:458:7]
wire io_enq_bits_uop_is_eret_0 = io_enq_bits_uop_is_eret; // @[util.scala:458:7]
wire io_enq_bits_uop_is_sys_pc2epc_0 = io_enq_bits_uop_is_sys_pc2epc; // @[util.scala:458:7]
wire io_enq_bits_uop_is_rocc_0 = io_enq_bits_uop_is_rocc; // @[util.scala:458:7]
wire io_enq_bits_uop_is_mov_0 = io_enq_bits_uop_is_mov; // @[util.scala:458:7]
wire [3:0] io_enq_bits_uop_ftq_idx_0 = io_enq_bits_uop_ftq_idx; // @[util.scala:458:7]
wire io_enq_bits_uop_edge_inst_0 = io_enq_bits_uop_edge_inst; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_pc_lob_0 = io_enq_bits_uop_pc_lob; // @[util.scala:458:7]
wire io_enq_bits_uop_taken_0 = io_enq_bits_uop_taken; // @[util.scala:458:7]
wire io_enq_bits_uop_imm_rename_0 = io_enq_bits_uop_imm_rename; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_imm_sel_0 = io_enq_bits_uop_imm_sel; // @[util.scala:458:7]
wire [4:0] io_enq_bits_uop_pimm_0 = io_enq_bits_uop_pimm; // @[util.scala:458:7]
wire [19:0] io_enq_bits_uop_imm_packed_0 = io_enq_bits_uop_imm_packed; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_op1_sel_0 = io_enq_bits_uop_op1_sel; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_op2_sel_0 = io_enq_bits_uop_op2_sel; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_ldst_0 = io_enq_bits_uop_fp_ctrl_ldst; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_wen_0 = io_enq_bits_uop_fp_ctrl_wen; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_ren1_0 = io_enq_bits_uop_fp_ctrl_ren1; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_ren2_0 = io_enq_bits_uop_fp_ctrl_ren2; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_ren3_0 = io_enq_bits_uop_fp_ctrl_ren3; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_swap12_0 = io_enq_bits_uop_fp_ctrl_swap12; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_swap23_0 = io_enq_bits_uop_fp_ctrl_swap23; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_fp_ctrl_typeTagIn_0 = io_enq_bits_uop_fp_ctrl_typeTagIn; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_fp_ctrl_typeTagOut_0 = io_enq_bits_uop_fp_ctrl_typeTagOut; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_fromint_0 = io_enq_bits_uop_fp_ctrl_fromint; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_toint_0 = io_enq_bits_uop_fp_ctrl_toint; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_fastpipe_0 = io_enq_bits_uop_fp_ctrl_fastpipe; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_fma_0 = io_enq_bits_uop_fp_ctrl_fma; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_div_0 = io_enq_bits_uop_fp_ctrl_div; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_sqrt_0 = io_enq_bits_uop_fp_ctrl_sqrt; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_wflags_0 = io_enq_bits_uop_fp_ctrl_wflags; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_vec_0 = io_enq_bits_uop_fp_ctrl_vec; // @[util.scala:458:7]
wire [4:0] io_enq_bits_uop_rob_idx_0 = io_enq_bits_uop_rob_idx; // @[util.scala:458:7]
wire [3:0] io_enq_bits_uop_ldq_idx_0 = io_enq_bits_uop_ldq_idx; // @[util.scala:458:7]
wire [3:0] io_enq_bits_uop_stq_idx_0 = io_enq_bits_uop_stq_idx; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_rxq_idx_0 = io_enq_bits_uop_rxq_idx; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_pdst_0 = io_enq_bits_uop_pdst; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_prs1_0 = io_enq_bits_uop_prs1; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_prs2_0 = io_enq_bits_uop_prs2; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_prs3_0 = io_enq_bits_uop_prs3; // @[util.scala:458:7]
wire [3:0] io_enq_bits_uop_ppred_0 = io_enq_bits_uop_ppred; // @[util.scala:458:7]
wire io_enq_bits_uop_prs1_busy_0 = io_enq_bits_uop_prs1_busy; // @[util.scala:458:7]
wire io_enq_bits_uop_prs2_busy_0 = io_enq_bits_uop_prs2_busy; // @[util.scala:458:7]
wire io_enq_bits_uop_prs3_busy_0 = io_enq_bits_uop_prs3_busy; // @[util.scala:458:7]
wire io_enq_bits_uop_ppred_busy_0 = io_enq_bits_uop_ppred_busy; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_stale_pdst_0 = io_enq_bits_uop_stale_pdst; // @[util.scala:458:7]
wire io_enq_bits_uop_exception_0 = io_enq_bits_uop_exception; // @[util.scala:458:7]
wire [63:0] io_enq_bits_uop_exc_cause_0 = io_enq_bits_uop_exc_cause; // @[util.scala:458:7]
wire [4:0] io_enq_bits_uop_mem_cmd_0 = io_enq_bits_uop_mem_cmd; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_mem_size_0 = io_enq_bits_uop_mem_size; // @[util.scala:458:7]
wire io_enq_bits_uop_mem_signed_0 = io_enq_bits_uop_mem_signed; // @[util.scala:458:7]
wire io_enq_bits_uop_uses_ldq_0 = io_enq_bits_uop_uses_ldq; // @[util.scala:458:7]
wire io_enq_bits_uop_uses_stq_0 = io_enq_bits_uop_uses_stq; // @[util.scala:458:7]
wire io_enq_bits_uop_is_unique_0 = io_enq_bits_uop_is_unique; // @[util.scala:458:7]
wire io_enq_bits_uop_flush_on_commit_0 = io_enq_bits_uop_flush_on_commit; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_csr_cmd_0 = io_enq_bits_uop_csr_cmd; // @[util.scala:458:7]
wire io_enq_bits_uop_ldst_is_rs1_0 = io_enq_bits_uop_ldst_is_rs1; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_ldst_0 = io_enq_bits_uop_ldst; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_lrs1_0 = io_enq_bits_uop_lrs1; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_lrs2_0 = io_enq_bits_uop_lrs2; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_lrs3_0 = io_enq_bits_uop_lrs3; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_dst_rtype_0 = io_enq_bits_uop_dst_rtype; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_lrs1_rtype_0 = io_enq_bits_uop_lrs1_rtype; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_lrs2_rtype_0 = io_enq_bits_uop_lrs2_rtype; // @[util.scala:458:7]
wire io_enq_bits_uop_frs3_en_0 = io_enq_bits_uop_frs3_en; // @[util.scala:458:7]
wire io_enq_bits_uop_fcn_dw_0 = io_enq_bits_uop_fcn_dw; // @[util.scala:458:7]
wire [4:0] io_enq_bits_uop_fcn_op_0 = io_enq_bits_uop_fcn_op; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_val_0 = io_enq_bits_uop_fp_val; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_fp_rm_0 = io_enq_bits_uop_fp_rm; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_fp_typ_0 = io_enq_bits_uop_fp_typ; // @[util.scala:458:7]
wire io_enq_bits_uop_xcpt_pf_if_0 = io_enq_bits_uop_xcpt_pf_if; // @[util.scala:458:7]
wire io_enq_bits_uop_xcpt_ae_if_0 = io_enq_bits_uop_xcpt_ae_if; // @[util.scala:458:7]
wire io_enq_bits_uop_xcpt_ma_if_0 = io_enq_bits_uop_xcpt_ma_if; // @[util.scala:458:7]
wire io_enq_bits_uop_bp_debug_if_0 = io_enq_bits_uop_bp_debug_if; // @[util.scala:458:7]
wire io_enq_bits_uop_bp_xcpt_if_0 = io_enq_bits_uop_bp_xcpt_if; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_debug_fsrc_0 = io_enq_bits_uop_debug_fsrc; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_debug_tsrc_0 = io_enq_bits_uop_debug_tsrc; // @[util.scala:458:7]
wire [33:0] io_enq_bits_addr_0 = io_enq_bits_addr; // @[util.scala:458:7]
wire [63:0] io_enq_bits_data_0 = io_enq_bits_data; // @[util.scala:458:7]
wire io_enq_bits_is_hella_0 = io_enq_bits_is_hella; // @[util.scala:458:7]
wire io_enq_bits_tag_match_0 = io_enq_bits_tag_match; // @[util.scala:458:7]
wire [1:0] io_enq_bits_old_meta_coh_state_0 = io_enq_bits_old_meta_coh_state; // @[util.scala:458:7]
wire [21:0] io_enq_bits_old_meta_tag_0 = io_enq_bits_old_meta_tag; // @[util.scala:458:7]
wire [1:0] io_enq_bits_way_en_0 = io_enq_bits_way_en; // @[util.scala:458:7]
wire [4:0] io_enq_bits_sdq_id_0 = io_enq_bits_sdq_id; // @[util.scala:458:7]
wire io_deq_ready_0 = io_deq_ready; // @[util.scala:458:7]
wire _do_enq_T_4 = 1'h1; // @[util.scala:514:42]
wire _do_enq_T_7 = 1'h1; // @[util.scala:514:102]
wire _valids_0_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_0_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_1_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_1_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_2_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_2_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_3_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_3_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_4_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_4_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_5_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_5_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_6_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_6_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_7_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_7_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_8_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_8_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_9_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_9_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_10_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_10_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_11_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_11_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_12_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_12_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_13_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_13_T_6 = 1'h1; // @[util.scala:520:83]
wire _valids_14_T_3 = 1'h1; // @[util.scala:520:34]
wire _valids_14_T_6 = 1'h1; // @[util.scala:520:83]
wire [3:0] _uops_0_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_1_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_2_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_3_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_4_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_5_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_6_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_7_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_8_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_9_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_10_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_11_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_12_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_13_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_14_br_mask_T = 4'hF; // @[util.scala:97:23]
wire [3:0] _uops_br_mask_T = 4'hF; // @[util.scala:93:27]
wire [20:0] io_brupdate_b2_target_offset = 21'h0; // @[util.scala:458:7, :463:14]
wire [63:0] io_brupdate_b2_uop_exc_cause = 64'h0; // @[util.scala:458:7, :463:14]
wire [19:0] io_brupdate_b2_uop_imm_packed = 20'h0; // @[util.scala:458:7, :463:14]
wire [4:0] io_brupdate_b2_uop_pimm = 5'h0; // @[util.scala:458:7, :463:14]
wire [4:0] io_brupdate_b2_uop_rob_idx = 5'h0; // @[util.scala:458:7, :463:14]
wire [4:0] io_brupdate_b2_uop_mem_cmd = 5'h0; // @[util.scala:458:7, :463:14]
wire [4:0] io_brupdate_b2_uop_fcn_op = 5'h0; // @[util.scala:458:7, :463:14]
wire [2:0] io_brupdate_b2_uop_imm_sel = 3'h0; // @[util.scala:458:7, :463:14]
wire [2:0] io_brupdate_b2_uop_op2_sel = 3'h0; // @[util.scala:458:7, :463:14]
wire [2:0] io_brupdate_b2_uop_csr_cmd = 3'h0; // @[util.scala:458:7, :463:14]
wire [2:0] io_brupdate_b2_uop_fp_rm = 3'h0; // @[util.scala:458:7, :463:14]
wire [2:0] io_brupdate_b2_uop_debug_fsrc = 3'h0; // @[util.scala:458:7, :463:14]
wire [2:0] io_brupdate_b2_uop_debug_tsrc = 3'h0; // @[util.scala:458:7, :463:14]
wire [2:0] io_brupdate_b2_cfi_type = 3'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_pc_lob = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_pdst = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_prs1 = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_prs2 = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_prs3 = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_stale_pdst = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_ldst = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_lrs1 = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_lrs2 = 6'h0; // @[util.scala:458:7, :463:14]
wire [5:0] io_brupdate_b2_uop_lrs3 = 6'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_br_tag = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_op1_sel = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagIn = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagOut = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_rxq_idx = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_mem_size = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_dst_rtype = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_lrs1_rtype = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_lrs2_rtype = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_uop_fp_typ = 2'h0; // @[util.scala:458:7, :463:14]
wire [1:0] io_brupdate_b2_pc_sel = 2'h0; // @[util.scala:458:7, :463:14]
wire [33:0] io_brupdate_b2_uop_debug_pc = 34'h0; // @[util.scala:458:7, :463:14]
wire [33:0] io_brupdate_b2_jalr_target = 34'h0; // @[util.scala:458:7, :463:14]
wire io_brupdate_b2_uop_is_rvc = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iq_type_0 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iq_type_1 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iq_type_2 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iq_type_3 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_0 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_1 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_2 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_3 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_4 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_5 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_6 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_7 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_8 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_9 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_issued = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_issued_partial_agen = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_issued_partial_dgen = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_p1_speculative_child = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_p2_speculative_child = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_p1_bypass_hint = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_p2_bypass_hint = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_p3_bypass_hint = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_dis_col_sel = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_sfb = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_fence = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_fencei = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_sfence = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_amo = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_eret = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_sys_pc2epc = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_rocc = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_mov = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_edge_inst = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_taken = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_imm_rename = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_ldst = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_wen = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_ren1 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_ren2 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_ren3 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_swap12 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_swap23 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_fromint = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_toint = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_fastpipe = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_fma = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_div = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_sqrt = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_wflags = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_vec = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_prs1_busy = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_prs2_busy = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_prs3_busy = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_ppred_busy = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_exception = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_mem_signed = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_uses_ldq = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_uses_stq = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_unique = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_flush_on_commit = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_ldst_is_rs1 = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_frs3_en = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fcn_dw = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_val = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_xcpt_pf_if = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_xcpt_ae_if = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_xcpt_ma_if = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_bp_debug_if = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_bp_xcpt_if = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_mispredict = 1'h0; // @[util.scala:458:7]
wire io_brupdate_b2_taken = 1'h0; // @[util.scala:458:7]
wire io_flush = 1'h0; // @[util.scala:458:7]
wire _valids_WIRE_0 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_1 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_2 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_3 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_4 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_5 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_6 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_7 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_8 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_9 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_10 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_11 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_12 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_13 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_14 = 1'h0; // @[util.scala:504:34]
wire _do_enq_T_2 = 1'h0; // @[util.scala:126:59]
wire _do_enq_T_3 = 1'h0; // @[util.scala:61:61]
wire _do_enq_T_6 = 1'h0; // @[util.scala:514:113]
wire _valids_0_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_0_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_0_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_1_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_1_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_1_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_2_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_2_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_2_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_3_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_3_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_3_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_4_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_4_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_4_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_5_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_5_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_5_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_6_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_6_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_6_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_7_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_7_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_7_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_8_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_8_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_8_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_9_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_9_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_9_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_10_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_10_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_10_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_11_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_11_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_11_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_12_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_12_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_12_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_13_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_13_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_13_T_5 = 1'h0; // @[util.scala:520:94]
wire _valids_14_T_1 = 1'h0; // @[util.scala:126:59]
wire _valids_14_T_2 = 1'h0; // @[util.scala:61:61]
wire _valids_14_T_5 = 1'h0; // @[util.scala:520:94]
wire [31:0] io_brupdate_b2_uop_inst = 32'h0; // @[util.scala:458:7, :463:14]
wire [31:0] io_brupdate_b2_uop_debug_inst = 32'h0; // @[util.scala:458:7, :463:14]
wire [3:0] io_brupdate_b1_resolve_mask = 4'h0; // @[util.scala:458:7]
wire [3:0] io_brupdate_b1_mispredict_mask = 4'h0; // @[util.scala:458:7]
wire [3:0] io_brupdate_b2_uop_br_mask = 4'h0; // @[util.scala:458:7]
wire [3:0] io_brupdate_b2_uop_br_type = 4'h0; // @[util.scala:458:7]
wire [3:0] io_brupdate_b2_uop_ftq_idx = 4'h0; // @[util.scala:458:7]
wire [3:0] io_brupdate_b2_uop_ldq_idx = 4'h0; // @[util.scala:458:7]
wire [3:0] io_brupdate_b2_uop_stq_idx = 4'h0; // @[util.scala:458:7]
wire [3:0] io_brupdate_b2_uop_ppred = 4'h0; // @[util.scala:458:7]
wire [3:0] _do_enq_T_1 = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_0_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_1_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_2_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_3_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_4_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_5_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_6_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_7_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_8_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_9_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_10_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_11_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_12_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_13_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _valids_14_T = 4'h0; // @[util.scala:126:51]
wire _io_enq_ready_T; // @[util.scala:543:21]
wire [3:0] _uops_br_mask_T_1 = io_enq_bits_uop_br_mask_0; // @[util.scala:93:25, :458:7]
wire _io_deq_valid_T_1; // @[util.scala:548:42]
wire [31:0] out_uop_inst; // @[util.scala:545:19]
wire [31:0] out_uop_debug_inst; // @[util.scala:545:19]
wire out_uop_is_rvc; // @[util.scala:545:19]
wire [33:0] out_uop_debug_pc; // @[util.scala:545:19]
wire out_uop_iq_type_0; // @[util.scala:545:19]
wire out_uop_iq_type_1; // @[util.scala:545:19]
wire out_uop_iq_type_2; // @[util.scala:545:19]
wire out_uop_iq_type_3; // @[util.scala:545:19]
wire out_uop_fu_code_0; // @[util.scala:545:19]
wire out_uop_fu_code_1; // @[util.scala:545:19]
wire out_uop_fu_code_2; // @[util.scala:545:19]
wire out_uop_fu_code_3; // @[util.scala:545:19]
wire out_uop_fu_code_4; // @[util.scala:545:19]
wire out_uop_fu_code_5; // @[util.scala:545:19]
wire out_uop_fu_code_6; // @[util.scala:545:19]
wire out_uop_fu_code_7; // @[util.scala:545:19]
wire out_uop_fu_code_8; // @[util.scala:545:19]
wire out_uop_fu_code_9; // @[util.scala:545:19]
wire out_uop_iw_issued; // @[util.scala:545:19]
wire out_uop_iw_issued_partial_agen; // @[util.scala:545:19]
wire out_uop_iw_issued_partial_dgen; // @[util.scala:545:19]
wire out_uop_iw_p1_speculative_child; // @[util.scala:545:19]
wire out_uop_iw_p2_speculative_child; // @[util.scala:545:19]
wire out_uop_iw_p1_bypass_hint; // @[util.scala:545:19]
wire out_uop_iw_p2_bypass_hint; // @[util.scala:545:19]
wire out_uop_iw_p3_bypass_hint; // @[util.scala:545:19]
wire out_uop_dis_col_sel; // @[util.scala:545:19]
wire [3:0] out_uop_br_mask; // @[util.scala:545:19]
wire [1:0] out_uop_br_tag; // @[util.scala:545:19]
wire [3:0] out_uop_br_type; // @[util.scala:545:19]
wire out_uop_is_sfb; // @[util.scala:545:19]
wire out_uop_is_fence; // @[util.scala:545:19]
wire out_uop_is_fencei; // @[util.scala:545:19]
wire out_uop_is_sfence; // @[util.scala:545:19]
wire out_uop_is_amo; // @[util.scala:545:19]
wire out_uop_is_eret; // @[util.scala:545:19]
wire out_uop_is_sys_pc2epc; // @[util.scala:545:19]
wire out_uop_is_rocc; // @[util.scala:545:19]
wire out_uop_is_mov; // @[util.scala:545:19]
wire [3:0] out_uop_ftq_idx; // @[util.scala:545:19]
wire out_uop_edge_inst; // @[util.scala:545:19]
wire [5:0] out_uop_pc_lob; // @[util.scala:545:19]
wire out_uop_taken; // @[util.scala:545:19]
wire out_uop_imm_rename; // @[util.scala:545:19]
wire [2:0] out_uop_imm_sel; // @[util.scala:545:19]
wire [4:0] out_uop_pimm; // @[util.scala:545:19]
wire [19:0] out_uop_imm_packed; // @[util.scala:545:19]
wire [1:0] out_uop_op1_sel; // @[util.scala:545:19]
wire [2:0] out_uop_op2_sel; // @[util.scala:545:19]
wire out_uop_fp_ctrl_ldst; // @[util.scala:545:19]
wire out_uop_fp_ctrl_wen; // @[util.scala:545:19]
wire out_uop_fp_ctrl_ren1; // @[util.scala:545:19]
wire out_uop_fp_ctrl_ren2; // @[util.scala:545:19]
wire out_uop_fp_ctrl_ren3; // @[util.scala:545:19]
wire out_uop_fp_ctrl_swap12; // @[util.scala:545:19]
wire out_uop_fp_ctrl_swap23; // @[util.scala:545:19]
wire [1:0] out_uop_fp_ctrl_typeTagIn; // @[util.scala:545:19]
wire [1:0] out_uop_fp_ctrl_typeTagOut; // @[util.scala:545:19]
wire out_uop_fp_ctrl_fromint; // @[util.scala:545:19]
wire out_uop_fp_ctrl_toint; // @[util.scala:545:19]
wire out_uop_fp_ctrl_fastpipe; // @[util.scala:545:19]
wire out_uop_fp_ctrl_fma; // @[util.scala:545:19]
wire out_uop_fp_ctrl_div; // @[util.scala:545:19]
wire out_uop_fp_ctrl_sqrt; // @[util.scala:545:19]
wire out_uop_fp_ctrl_wflags; // @[util.scala:545:19]
wire out_uop_fp_ctrl_vec; // @[util.scala:545:19]
wire [4:0] out_uop_rob_idx; // @[util.scala:545:19]
wire [3:0] out_uop_ldq_idx; // @[util.scala:545:19]
wire [3:0] out_uop_stq_idx; // @[util.scala:545:19]
wire [1:0] out_uop_rxq_idx; // @[util.scala:545:19]
wire [5:0] out_uop_pdst; // @[util.scala:545:19]
wire [5:0] out_uop_prs1; // @[util.scala:545:19]
wire [5:0] out_uop_prs2; // @[util.scala:545:19]
wire [5:0] out_uop_prs3; // @[util.scala:545:19]
wire [3:0] out_uop_ppred; // @[util.scala:545:19]
wire out_uop_prs1_busy; // @[util.scala:545:19]
wire out_uop_prs2_busy; // @[util.scala:545:19]
wire out_uop_prs3_busy; // @[util.scala:545:19]
wire out_uop_ppred_busy; // @[util.scala:545:19]
wire [5:0] out_uop_stale_pdst; // @[util.scala:545:19]
wire out_uop_exception; // @[util.scala:545:19]
wire [63:0] out_uop_exc_cause; // @[util.scala:545:19]
wire [4:0] out_uop_mem_cmd; // @[util.scala:545:19]
wire [1:0] out_uop_mem_size; // @[util.scala:545:19]
wire out_uop_mem_signed; // @[util.scala:545:19]
wire out_uop_uses_ldq; // @[util.scala:545:19]
wire out_uop_uses_stq; // @[util.scala:545:19]
wire out_uop_is_unique; // @[util.scala:545:19]
wire out_uop_flush_on_commit; // @[util.scala:545:19]
wire [2:0] out_uop_csr_cmd; // @[util.scala:545:19]
wire out_uop_ldst_is_rs1; // @[util.scala:545:19]
wire [5:0] out_uop_ldst; // @[util.scala:545:19]
wire [5:0] out_uop_lrs1; // @[util.scala:545:19]
wire [5:0] out_uop_lrs2; // @[util.scala:545:19]
wire [5:0] out_uop_lrs3; // @[util.scala:545:19]
wire [1:0] out_uop_dst_rtype; // @[util.scala:545:19]
wire [1:0] out_uop_lrs1_rtype; // @[util.scala:545:19]
wire [1:0] out_uop_lrs2_rtype; // @[util.scala:545:19]
wire out_uop_frs3_en; // @[util.scala:545:19]
wire out_uop_fcn_dw; // @[util.scala:545:19]
wire [4:0] out_uop_fcn_op; // @[util.scala:545:19]
wire out_uop_fp_val; // @[util.scala:545:19]
wire [2:0] out_uop_fp_rm; // @[util.scala:545:19]
wire [1:0] out_uop_fp_typ; // @[util.scala:545:19]
wire out_uop_xcpt_pf_if; // @[util.scala:545:19]
wire out_uop_xcpt_ae_if; // @[util.scala:545:19]
wire out_uop_xcpt_ma_if; // @[util.scala:545:19]
wire out_uop_bp_debug_if; // @[util.scala:545:19]
wire out_uop_bp_xcpt_if; // @[util.scala:545:19]
wire [2:0] out_uop_debug_fsrc; // @[util.scala:545:19]
wire [2:0] out_uop_debug_tsrc; // @[util.scala:545:19]
wire [33:0] out_addr; // @[util.scala:545:19]
wire [63:0] out_data; // @[util.scala:545:19]
wire out_is_hella; // @[util.scala:545:19]
wire out_tag_match; // @[util.scala:545:19]
wire [1:0] out_old_meta_coh_state; // @[util.scala:545:19]
wire [21:0] out_old_meta_tag; // @[util.scala:545:19]
wire [1:0] out_way_en; // @[util.scala:545:19]
wire [4:0] out_sdq_id; // @[util.scala:545:19]
wire _io_empty_T_1; // @[util.scala:512:27]
wire [3:0] _io_count_T_5; // @[util.scala:556:22]
wire io_enq_ready_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iq_type_0_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iq_type_1_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iq_type_2_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iq_type_3_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_0_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_1_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_2_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_3_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_4_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_5_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_6_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_7_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_8_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_9_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7]
wire [31:0] io_deq_bits_uop_inst_0; // @[util.scala:458:7]
wire [31:0] io_deq_bits_uop_debug_inst_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_rvc_0; // @[util.scala:458:7]
wire [33:0] io_deq_bits_uop_debug_pc_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_issued_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7]
wire io_deq_bits_uop_dis_col_sel_0; // @[util.scala:458:7]
wire [3:0] io_deq_bits_uop_br_mask_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_br_tag_0; // @[util.scala:458:7]
wire [3:0] io_deq_bits_uop_br_type_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_sfb_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_fence_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_fencei_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_sfence_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_amo_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_eret_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_rocc_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_mov_0; // @[util.scala:458:7]
wire [3:0] io_deq_bits_uop_ftq_idx_0; // @[util.scala:458:7]
wire io_deq_bits_uop_edge_inst_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_pc_lob_0; // @[util.scala:458:7]
wire io_deq_bits_uop_taken_0; // @[util.scala:458:7]
wire io_deq_bits_uop_imm_rename_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_imm_sel_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_uop_pimm_0; // @[util.scala:458:7]
wire [19:0] io_deq_bits_uop_imm_packed_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_op1_sel_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_op2_sel_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_uop_rob_idx_0; // @[util.scala:458:7]
wire [3:0] io_deq_bits_uop_ldq_idx_0; // @[util.scala:458:7]
wire [3:0] io_deq_bits_uop_stq_idx_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_rxq_idx_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_pdst_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_prs1_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_prs2_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_prs3_0; // @[util.scala:458:7]
wire [3:0] io_deq_bits_uop_ppred_0; // @[util.scala:458:7]
wire io_deq_bits_uop_prs1_busy_0; // @[util.scala:458:7]
wire io_deq_bits_uop_prs2_busy_0; // @[util.scala:458:7]
wire io_deq_bits_uop_prs3_busy_0; // @[util.scala:458:7]
wire io_deq_bits_uop_ppred_busy_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_stale_pdst_0; // @[util.scala:458:7]
wire io_deq_bits_uop_exception_0; // @[util.scala:458:7]
wire [63:0] io_deq_bits_uop_exc_cause_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_uop_mem_cmd_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_mem_size_0; // @[util.scala:458:7]
wire io_deq_bits_uop_mem_signed_0; // @[util.scala:458:7]
wire io_deq_bits_uop_uses_ldq_0; // @[util.scala:458:7]
wire io_deq_bits_uop_uses_stq_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_unique_0; // @[util.scala:458:7]
wire io_deq_bits_uop_flush_on_commit_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_csr_cmd_0; // @[util.scala:458:7]
wire io_deq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_ldst_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_lrs1_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_lrs2_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_lrs3_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_dst_rtype_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7]
wire io_deq_bits_uop_frs3_en_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fcn_dw_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_uop_fcn_op_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_val_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_fp_rm_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_fp_typ_0; // @[util.scala:458:7]
wire io_deq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7]
wire io_deq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7]
wire io_deq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7]
wire io_deq_bits_uop_bp_debug_if_0; // @[util.scala:458:7]
wire io_deq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_debug_fsrc_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_debug_tsrc_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_old_meta_coh_state_0; // @[util.scala:458:7]
wire [21:0] io_deq_bits_old_meta_tag_0; // @[util.scala:458:7]
wire [33:0] io_deq_bits_addr_0; // @[util.scala:458:7]
wire [63:0] io_deq_bits_data_0; // @[util.scala:458:7]
wire io_deq_bits_is_hella_0; // @[util.scala:458:7]
wire io_deq_bits_tag_match_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_way_en_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_sdq_id_0; // @[util.scala:458:7]
wire io_deq_valid_0; // @[util.scala:458:7]
wire io_empty_0; // @[util.scala:458:7]
wire [3:0] io_count_0; // @[util.scala:458:7]
assign out_addr = _ram_ext_R0_data[33:0]; // @[util.scala:503:22, :545:19]
assign out_data = _ram_ext_R0_data[97:34]; // @[util.scala:503:22, :545:19]
assign out_is_hella = _ram_ext_R0_data[98]; // @[util.scala:503:22, :545:19]
assign out_tag_match = _ram_ext_R0_data[99]; // @[util.scala:503:22, :545:19]
assign out_old_meta_coh_state = _ram_ext_R0_data[101:100]; // @[util.scala:503:22, :545:19]
assign out_old_meta_tag = _ram_ext_R0_data[123:102]; // @[util.scala:503:22, :545:19]
assign out_way_en = _ram_ext_R0_data[125:124]; // @[util.scala:503:22, :545:19]
assign out_sdq_id = _ram_ext_R0_data[130:126]; // @[util.scala:503:22, :545:19]
reg valids_0; // @[util.scala:504:26]
wire _valids_0_T_4 = valids_0; // @[util.scala:504:26, :520:31]
reg valids_1; // @[util.scala:504:26]
wire _valids_1_T_4 = valids_1; // @[util.scala:504:26, :520:31]
reg valids_2; // @[util.scala:504:26]
wire _valids_2_T_4 = valids_2; // @[util.scala:504:26, :520:31]
reg valids_3; // @[util.scala:504:26]
wire _valids_3_T_4 = valids_3; // @[util.scala:504:26, :520:31]
reg valids_4; // @[util.scala:504:26]
wire _valids_4_T_4 = valids_4; // @[util.scala:504:26, :520:31]
reg valids_5; // @[util.scala:504:26]
wire _valids_5_T_4 = valids_5; // @[util.scala:504:26, :520:31]
reg valids_6; // @[util.scala:504:26]
wire _valids_6_T_4 = valids_6; // @[util.scala:504:26, :520:31]
reg valids_7; // @[util.scala:504:26]
wire _valids_7_T_4 = valids_7; // @[util.scala:504:26, :520:31]
reg valids_8; // @[util.scala:504:26]
wire _valids_8_T_4 = valids_8; // @[util.scala:504:26, :520:31]
reg valids_9; // @[util.scala:504:26]
wire _valids_9_T_4 = valids_9; // @[util.scala:504:26, :520:31]
reg valids_10; // @[util.scala:504:26]
wire _valids_10_T_4 = valids_10; // @[util.scala:504:26, :520:31]
reg valids_11; // @[util.scala:504:26]
wire _valids_11_T_4 = valids_11; // @[util.scala:504:26, :520:31]
reg valids_12; // @[util.scala:504:26]
wire _valids_12_T_4 = valids_12; // @[util.scala:504:26, :520:31]
reg valids_13; // @[util.scala:504:26]
wire _valids_13_T_4 = valids_13; // @[util.scala:504:26, :520:31]
reg valids_14; // @[util.scala:504:26]
wire _valids_14_T_4 = valids_14; // @[util.scala:504:26, :520:31]
reg [31:0] uops_0_inst; // @[util.scala:505:22]
reg [31:0] uops_0_debug_inst; // @[util.scala:505:22]
reg uops_0_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_0_debug_pc; // @[util.scala:505:22]
reg uops_0_iq_type_0; // @[util.scala:505:22]
reg uops_0_iq_type_1; // @[util.scala:505:22]
reg uops_0_iq_type_2; // @[util.scala:505:22]
reg uops_0_iq_type_3; // @[util.scala:505:22]
reg uops_0_fu_code_0; // @[util.scala:505:22]
reg uops_0_fu_code_1; // @[util.scala:505:22]
reg uops_0_fu_code_2; // @[util.scala:505:22]
reg uops_0_fu_code_3; // @[util.scala:505:22]
reg uops_0_fu_code_4; // @[util.scala:505:22]
reg uops_0_fu_code_5; // @[util.scala:505:22]
reg uops_0_fu_code_6; // @[util.scala:505:22]
reg uops_0_fu_code_7; // @[util.scala:505:22]
reg uops_0_fu_code_8; // @[util.scala:505:22]
reg uops_0_fu_code_9; // @[util.scala:505:22]
reg uops_0_iw_issued; // @[util.scala:505:22]
reg uops_0_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_0_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_0_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_0_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_0_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_0_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_0_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_0_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_0_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_0_br_mask_T_1 = uops_0_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_0_br_tag; // @[util.scala:505:22]
reg [3:0] uops_0_br_type; // @[util.scala:505:22]
reg uops_0_is_sfb; // @[util.scala:505:22]
reg uops_0_is_fence; // @[util.scala:505:22]
reg uops_0_is_fencei; // @[util.scala:505:22]
reg uops_0_is_sfence; // @[util.scala:505:22]
reg uops_0_is_amo; // @[util.scala:505:22]
reg uops_0_is_eret; // @[util.scala:505:22]
reg uops_0_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_0_is_rocc; // @[util.scala:505:22]
reg uops_0_is_mov; // @[util.scala:505:22]
reg [3:0] uops_0_ftq_idx; // @[util.scala:505:22]
reg uops_0_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_0_pc_lob; // @[util.scala:505:22]
reg uops_0_taken; // @[util.scala:505:22]
reg uops_0_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_0_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_0_pimm; // @[util.scala:505:22]
reg [19:0] uops_0_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_0_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_0_op2_sel; // @[util.scala:505:22]
reg uops_0_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_0_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_0_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_0_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_0_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_0_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_0_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_0_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_0_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_0_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_0_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_0_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_0_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_0_fp_ctrl_div; // @[util.scala:505:22]
reg uops_0_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_0_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_0_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_0_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_0_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_0_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_0_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_0_pdst; // @[util.scala:505:22]
reg [5:0] uops_0_prs1; // @[util.scala:505:22]
reg [5:0] uops_0_prs2; // @[util.scala:505:22]
reg [5:0] uops_0_prs3; // @[util.scala:505:22]
reg [3:0] uops_0_ppred; // @[util.scala:505:22]
reg uops_0_prs1_busy; // @[util.scala:505:22]
reg uops_0_prs2_busy; // @[util.scala:505:22]
reg uops_0_prs3_busy; // @[util.scala:505:22]
reg uops_0_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_0_stale_pdst; // @[util.scala:505:22]
reg uops_0_exception; // @[util.scala:505:22]
reg [63:0] uops_0_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_0_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_0_mem_size; // @[util.scala:505:22]
reg uops_0_mem_signed; // @[util.scala:505:22]
reg uops_0_uses_ldq; // @[util.scala:505:22]
reg uops_0_uses_stq; // @[util.scala:505:22]
reg uops_0_is_unique; // @[util.scala:505:22]
reg uops_0_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_0_csr_cmd; // @[util.scala:505:22]
reg uops_0_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_0_ldst; // @[util.scala:505:22]
reg [5:0] uops_0_lrs1; // @[util.scala:505:22]
reg [5:0] uops_0_lrs2; // @[util.scala:505:22]
reg [5:0] uops_0_lrs3; // @[util.scala:505:22]
reg [1:0] uops_0_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_0_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_0_lrs2_rtype; // @[util.scala:505:22]
reg uops_0_frs3_en; // @[util.scala:505:22]
reg uops_0_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_0_fcn_op; // @[util.scala:505:22]
reg uops_0_fp_val; // @[util.scala:505:22]
reg [2:0] uops_0_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_0_fp_typ; // @[util.scala:505:22]
reg uops_0_xcpt_pf_if; // @[util.scala:505:22]
reg uops_0_xcpt_ae_if; // @[util.scala:505:22]
reg uops_0_xcpt_ma_if; // @[util.scala:505:22]
reg uops_0_bp_debug_if; // @[util.scala:505:22]
reg uops_0_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_0_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_0_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_1_inst; // @[util.scala:505:22]
reg [31:0] uops_1_debug_inst; // @[util.scala:505:22]
reg uops_1_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_1_debug_pc; // @[util.scala:505:22]
reg uops_1_iq_type_0; // @[util.scala:505:22]
reg uops_1_iq_type_1; // @[util.scala:505:22]
reg uops_1_iq_type_2; // @[util.scala:505:22]
reg uops_1_iq_type_3; // @[util.scala:505:22]
reg uops_1_fu_code_0; // @[util.scala:505:22]
reg uops_1_fu_code_1; // @[util.scala:505:22]
reg uops_1_fu_code_2; // @[util.scala:505:22]
reg uops_1_fu_code_3; // @[util.scala:505:22]
reg uops_1_fu_code_4; // @[util.scala:505:22]
reg uops_1_fu_code_5; // @[util.scala:505:22]
reg uops_1_fu_code_6; // @[util.scala:505:22]
reg uops_1_fu_code_7; // @[util.scala:505:22]
reg uops_1_fu_code_8; // @[util.scala:505:22]
reg uops_1_fu_code_9; // @[util.scala:505:22]
reg uops_1_iw_issued; // @[util.scala:505:22]
reg uops_1_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_1_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_1_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_1_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_1_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_1_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_1_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_1_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_1_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_1_br_mask_T_1 = uops_1_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_1_br_tag; // @[util.scala:505:22]
reg [3:0] uops_1_br_type; // @[util.scala:505:22]
reg uops_1_is_sfb; // @[util.scala:505:22]
reg uops_1_is_fence; // @[util.scala:505:22]
reg uops_1_is_fencei; // @[util.scala:505:22]
reg uops_1_is_sfence; // @[util.scala:505:22]
reg uops_1_is_amo; // @[util.scala:505:22]
reg uops_1_is_eret; // @[util.scala:505:22]
reg uops_1_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_1_is_rocc; // @[util.scala:505:22]
reg uops_1_is_mov; // @[util.scala:505:22]
reg [3:0] uops_1_ftq_idx; // @[util.scala:505:22]
reg uops_1_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_1_pc_lob; // @[util.scala:505:22]
reg uops_1_taken; // @[util.scala:505:22]
reg uops_1_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_1_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_1_pimm; // @[util.scala:505:22]
reg [19:0] uops_1_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_1_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_1_op2_sel; // @[util.scala:505:22]
reg uops_1_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_1_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_1_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_1_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_1_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_1_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_1_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_1_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_1_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_1_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_1_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_1_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_1_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_1_fp_ctrl_div; // @[util.scala:505:22]
reg uops_1_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_1_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_1_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_1_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_1_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_1_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_1_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_1_pdst; // @[util.scala:505:22]
reg [5:0] uops_1_prs1; // @[util.scala:505:22]
reg [5:0] uops_1_prs2; // @[util.scala:505:22]
reg [5:0] uops_1_prs3; // @[util.scala:505:22]
reg [3:0] uops_1_ppred; // @[util.scala:505:22]
reg uops_1_prs1_busy; // @[util.scala:505:22]
reg uops_1_prs2_busy; // @[util.scala:505:22]
reg uops_1_prs3_busy; // @[util.scala:505:22]
reg uops_1_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_1_stale_pdst; // @[util.scala:505:22]
reg uops_1_exception; // @[util.scala:505:22]
reg [63:0] uops_1_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_1_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_1_mem_size; // @[util.scala:505:22]
reg uops_1_mem_signed; // @[util.scala:505:22]
reg uops_1_uses_ldq; // @[util.scala:505:22]
reg uops_1_uses_stq; // @[util.scala:505:22]
reg uops_1_is_unique; // @[util.scala:505:22]
reg uops_1_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_1_csr_cmd; // @[util.scala:505:22]
reg uops_1_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_1_ldst; // @[util.scala:505:22]
reg [5:0] uops_1_lrs1; // @[util.scala:505:22]
reg [5:0] uops_1_lrs2; // @[util.scala:505:22]
reg [5:0] uops_1_lrs3; // @[util.scala:505:22]
reg [1:0] uops_1_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_1_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_1_lrs2_rtype; // @[util.scala:505:22]
reg uops_1_frs3_en; // @[util.scala:505:22]
reg uops_1_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_1_fcn_op; // @[util.scala:505:22]
reg uops_1_fp_val; // @[util.scala:505:22]
reg [2:0] uops_1_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_1_fp_typ; // @[util.scala:505:22]
reg uops_1_xcpt_pf_if; // @[util.scala:505:22]
reg uops_1_xcpt_ae_if; // @[util.scala:505:22]
reg uops_1_xcpt_ma_if; // @[util.scala:505:22]
reg uops_1_bp_debug_if; // @[util.scala:505:22]
reg uops_1_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_1_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_1_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_2_inst; // @[util.scala:505:22]
reg [31:0] uops_2_debug_inst; // @[util.scala:505:22]
reg uops_2_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_2_debug_pc; // @[util.scala:505:22]
reg uops_2_iq_type_0; // @[util.scala:505:22]
reg uops_2_iq_type_1; // @[util.scala:505:22]
reg uops_2_iq_type_2; // @[util.scala:505:22]
reg uops_2_iq_type_3; // @[util.scala:505:22]
reg uops_2_fu_code_0; // @[util.scala:505:22]
reg uops_2_fu_code_1; // @[util.scala:505:22]
reg uops_2_fu_code_2; // @[util.scala:505:22]
reg uops_2_fu_code_3; // @[util.scala:505:22]
reg uops_2_fu_code_4; // @[util.scala:505:22]
reg uops_2_fu_code_5; // @[util.scala:505:22]
reg uops_2_fu_code_6; // @[util.scala:505:22]
reg uops_2_fu_code_7; // @[util.scala:505:22]
reg uops_2_fu_code_8; // @[util.scala:505:22]
reg uops_2_fu_code_9; // @[util.scala:505:22]
reg uops_2_iw_issued; // @[util.scala:505:22]
reg uops_2_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_2_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_2_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_2_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_2_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_2_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_2_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_2_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_2_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_2_br_mask_T_1 = uops_2_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_2_br_tag; // @[util.scala:505:22]
reg [3:0] uops_2_br_type; // @[util.scala:505:22]
reg uops_2_is_sfb; // @[util.scala:505:22]
reg uops_2_is_fence; // @[util.scala:505:22]
reg uops_2_is_fencei; // @[util.scala:505:22]
reg uops_2_is_sfence; // @[util.scala:505:22]
reg uops_2_is_amo; // @[util.scala:505:22]
reg uops_2_is_eret; // @[util.scala:505:22]
reg uops_2_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_2_is_rocc; // @[util.scala:505:22]
reg uops_2_is_mov; // @[util.scala:505:22]
reg [3:0] uops_2_ftq_idx; // @[util.scala:505:22]
reg uops_2_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_2_pc_lob; // @[util.scala:505:22]
reg uops_2_taken; // @[util.scala:505:22]
reg uops_2_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_2_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_2_pimm; // @[util.scala:505:22]
reg [19:0] uops_2_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_2_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_2_op2_sel; // @[util.scala:505:22]
reg uops_2_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_2_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_2_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_2_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_2_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_2_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_2_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_2_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_2_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_2_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_2_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_2_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_2_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_2_fp_ctrl_div; // @[util.scala:505:22]
reg uops_2_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_2_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_2_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_2_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_2_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_2_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_2_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_2_pdst; // @[util.scala:505:22]
reg [5:0] uops_2_prs1; // @[util.scala:505:22]
reg [5:0] uops_2_prs2; // @[util.scala:505:22]
reg [5:0] uops_2_prs3; // @[util.scala:505:22]
reg [3:0] uops_2_ppred; // @[util.scala:505:22]
reg uops_2_prs1_busy; // @[util.scala:505:22]
reg uops_2_prs2_busy; // @[util.scala:505:22]
reg uops_2_prs3_busy; // @[util.scala:505:22]
reg uops_2_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_2_stale_pdst; // @[util.scala:505:22]
reg uops_2_exception; // @[util.scala:505:22]
reg [63:0] uops_2_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_2_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_2_mem_size; // @[util.scala:505:22]
reg uops_2_mem_signed; // @[util.scala:505:22]
reg uops_2_uses_ldq; // @[util.scala:505:22]
reg uops_2_uses_stq; // @[util.scala:505:22]
reg uops_2_is_unique; // @[util.scala:505:22]
reg uops_2_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_2_csr_cmd; // @[util.scala:505:22]
reg uops_2_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_2_ldst; // @[util.scala:505:22]
reg [5:0] uops_2_lrs1; // @[util.scala:505:22]
reg [5:0] uops_2_lrs2; // @[util.scala:505:22]
reg [5:0] uops_2_lrs3; // @[util.scala:505:22]
reg [1:0] uops_2_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_2_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_2_lrs2_rtype; // @[util.scala:505:22]
reg uops_2_frs3_en; // @[util.scala:505:22]
reg uops_2_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_2_fcn_op; // @[util.scala:505:22]
reg uops_2_fp_val; // @[util.scala:505:22]
reg [2:0] uops_2_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_2_fp_typ; // @[util.scala:505:22]
reg uops_2_xcpt_pf_if; // @[util.scala:505:22]
reg uops_2_xcpt_ae_if; // @[util.scala:505:22]
reg uops_2_xcpt_ma_if; // @[util.scala:505:22]
reg uops_2_bp_debug_if; // @[util.scala:505:22]
reg uops_2_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_2_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_2_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_3_inst; // @[util.scala:505:22]
reg [31:0] uops_3_debug_inst; // @[util.scala:505:22]
reg uops_3_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_3_debug_pc; // @[util.scala:505:22]
reg uops_3_iq_type_0; // @[util.scala:505:22]
reg uops_3_iq_type_1; // @[util.scala:505:22]
reg uops_3_iq_type_2; // @[util.scala:505:22]
reg uops_3_iq_type_3; // @[util.scala:505:22]
reg uops_3_fu_code_0; // @[util.scala:505:22]
reg uops_3_fu_code_1; // @[util.scala:505:22]
reg uops_3_fu_code_2; // @[util.scala:505:22]
reg uops_3_fu_code_3; // @[util.scala:505:22]
reg uops_3_fu_code_4; // @[util.scala:505:22]
reg uops_3_fu_code_5; // @[util.scala:505:22]
reg uops_3_fu_code_6; // @[util.scala:505:22]
reg uops_3_fu_code_7; // @[util.scala:505:22]
reg uops_3_fu_code_8; // @[util.scala:505:22]
reg uops_3_fu_code_9; // @[util.scala:505:22]
reg uops_3_iw_issued; // @[util.scala:505:22]
reg uops_3_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_3_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_3_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_3_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_3_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_3_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_3_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_3_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_3_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_3_br_mask_T_1 = uops_3_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_3_br_tag; // @[util.scala:505:22]
reg [3:0] uops_3_br_type; // @[util.scala:505:22]
reg uops_3_is_sfb; // @[util.scala:505:22]
reg uops_3_is_fence; // @[util.scala:505:22]
reg uops_3_is_fencei; // @[util.scala:505:22]
reg uops_3_is_sfence; // @[util.scala:505:22]
reg uops_3_is_amo; // @[util.scala:505:22]
reg uops_3_is_eret; // @[util.scala:505:22]
reg uops_3_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_3_is_rocc; // @[util.scala:505:22]
reg uops_3_is_mov; // @[util.scala:505:22]
reg [3:0] uops_3_ftq_idx; // @[util.scala:505:22]
reg uops_3_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_3_pc_lob; // @[util.scala:505:22]
reg uops_3_taken; // @[util.scala:505:22]
reg uops_3_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_3_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_3_pimm; // @[util.scala:505:22]
reg [19:0] uops_3_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_3_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_3_op2_sel; // @[util.scala:505:22]
reg uops_3_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_3_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_3_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_3_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_3_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_3_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_3_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_3_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_3_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_3_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_3_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_3_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_3_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_3_fp_ctrl_div; // @[util.scala:505:22]
reg uops_3_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_3_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_3_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_3_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_3_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_3_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_3_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_3_pdst; // @[util.scala:505:22]
reg [5:0] uops_3_prs1; // @[util.scala:505:22]
reg [5:0] uops_3_prs2; // @[util.scala:505:22]
reg [5:0] uops_3_prs3; // @[util.scala:505:22]
reg [3:0] uops_3_ppred; // @[util.scala:505:22]
reg uops_3_prs1_busy; // @[util.scala:505:22]
reg uops_3_prs2_busy; // @[util.scala:505:22]
reg uops_3_prs3_busy; // @[util.scala:505:22]
reg uops_3_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_3_stale_pdst; // @[util.scala:505:22]
reg uops_3_exception; // @[util.scala:505:22]
reg [63:0] uops_3_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_3_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_3_mem_size; // @[util.scala:505:22]
reg uops_3_mem_signed; // @[util.scala:505:22]
reg uops_3_uses_ldq; // @[util.scala:505:22]
reg uops_3_uses_stq; // @[util.scala:505:22]
reg uops_3_is_unique; // @[util.scala:505:22]
reg uops_3_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_3_csr_cmd; // @[util.scala:505:22]
reg uops_3_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_3_ldst; // @[util.scala:505:22]
reg [5:0] uops_3_lrs1; // @[util.scala:505:22]
reg [5:0] uops_3_lrs2; // @[util.scala:505:22]
reg [5:0] uops_3_lrs3; // @[util.scala:505:22]
reg [1:0] uops_3_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_3_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_3_lrs2_rtype; // @[util.scala:505:22]
reg uops_3_frs3_en; // @[util.scala:505:22]
reg uops_3_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_3_fcn_op; // @[util.scala:505:22]
reg uops_3_fp_val; // @[util.scala:505:22]
reg [2:0] uops_3_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_3_fp_typ; // @[util.scala:505:22]
reg uops_3_xcpt_pf_if; // @[util.scala:505:22]
reg uops_3_xcpt_ae_if; // @[util.scala:505:22]
reg uops_3_xcpt_ma_if; // @[util.scala:505:22]
reg uops_3_bp_debug_if; // @[util.scala:505:22]
reg uops_3_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_3_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_3_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_4_inst; // @[util.scala:505:22]
reg [31:0] uops_4_debug_inst; // @[util.scala:505:22]
reg uops_4_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_4_debug_pc; // @[util.scala:505:22]
reg uops_4_iq_type_0; // @[util.scala:505:22]
reg uops_4_iq_type_1; // @[util.scala:505:22]
reg uops_4_iq_type_2; // @[util.scala:505:22]
reg uops_4_iq_type_3; // @[util.scala:505:22]
reg uops_4_fu_code_0; // @[util.scala:505:22]
reg uops_4_fu_code_1; // @[util.scala:505:22]
reg uops_4_fu_code_2; // @[util.scala:505:22]
reg uops_4_fu_code_3; // @[util.scala:505:22]
reg uops_4_fu_code_4; // @[util.scala:505:22]
reg uops_4_fu_code_5; // @[util.scala:505:22]
reg uops_4_fu_code_6; // @[util.scala:505:22]
reg uops_4_fu_code_7; // @[util.scala:505:22]
reg uops_4_fu_code_8; // @[util.scala:505:22]
reg uops_4_fu_code_9; // @[util.scala:505:22]
reg uops_4_iw_issued; // @[util.scala:505:22]
reg uops_4_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_4_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_4_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_4_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_4_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_4_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_4_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_4_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_4_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_4_br_mask_T_1 = uops_4_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_4_br_tag; // @[util.scala:505:22]
reg [3:0] uops_4_br_type; // @[util.scala:505:22]
reg uops_4_is_sfb; // @[util.scala:505:22]
reg uops_4_is_fence; // @[util.scala:505:22]
reg uops_4_is_fencei; // @[util.scala:505:22]
reg uops_4_is_sfence; // @[util.scala:505:22]
reg uops_4_is_amo; // @[util.scala:505:22]
reg uops_4_is_eret; // @[util.scala:505:22]
reg uops_4_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_4_is_rocc; // @[util.scala:505:22]
reg uops_4_is_mov; // @[util.scala:505:22]
reg [3:0] uops_4_ftq_idx; // @[util.scala:505:22]
reg uops_4_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_4_pc_lob; // @[util.scala:505:22]
reg uops_4_taken; // @[util.scala:505:22]
reg uops_4_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_4_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_4_pimm; // @[util.scala:505:22]
reg [19:0] uops_4_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_4_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_4_op2_sel; // @[util.scala:505:22]
reg uops_4_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_4_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_4_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_4_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_4_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_4_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_4_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_4_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_4_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_4_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_4_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_4_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_4_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_4_fp_ctrl_div; // @[util.scala:505:22]
reg uops_4_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_4_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_4_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_4_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_4_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_4_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_4_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_4_pdst; // @[util.scala:505:22]
reg [5:0] uops_4_prs1; // @[util.scala:505:22]
reg [5:0] uops_4_prs2; // @[util.scala:505:22]
reg [5:0] uops_4_prs3; // @[util.scala:505:22]
reg [3:0] uops_4_ppred; // @[util.scala:505:22]
reg uops_4_prs1_busy; // @[util.scala:505:22]
reg uops_4_prs2_busy; // @[util.scala:505:22]
reg uops_4_prs3_busy; // @[util.scala:505:22]
reg uops_4_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_4_stale_pdst; // @[util.scala:505:22]
reg uops_4_exception; // @[util.scala:505:22]
reg [63:0] uops_4_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_4_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_4_mem_size; // @[util.scala:505:22]
reg uops_4_mem_signed; // @[util.scala:505:22]
reg uops_4_uses_ldq; // @[util.scala:505:22]
reg uops_4_uses_stq; // @[util.scala:505:22]
reg uops_4_is_unique; // @[util.scala:505:22]
reg uops_4_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_4_csr_cmd; // @[util.scala:505:22]
reg uops_4_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_4_ldst; // @[util.scala:505:22]
reg [5:0] uops_4_lrs1; // @[util.scala:505:22]
reg [5:0] uops_4_lrs2; // @[util.scala:505:22]
reg [5:0] uops_4_lrs3; // @[util.scala:505:22]
reg [1:0] uops_4_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_4_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_4_lrs2_rtype; // @[util.scala:505:22]
reg uops_4_frs3_en; // @[util.scala:505:22]
reg uops_4_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_4_fcn_op; // @[util.scala:505:22]
reg uops_4_fp_val; // @[util.scala:505:22]
reg [2:0] uops_4_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_4_fp_typ; // @[util.scala:505:22]
reg uops_4_xcpt_pf_if; // @[util.scala:505:22]
reg uops_4_xcpt_ae_if; // @[util.scala:505:22]
reg uops_4_xcpt_ma_if; // @[util.scala:505:22]
reg uops_4_bp_debug_if; // @[util.scala:505:22]
reg uops_4_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_4_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_4_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_5_inst; // @[util.scala:505:22]
reg [31:0] uops_5_debug_inst; // @[util.scala:505:22]
reg uops_5_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_5_debug_pc; // @[util.scala:505:22]
reg uops_5_iq_type_0; // @[util.scala:505:22]
reg uops_5_iq_type_1; // @[util.scala:505:22]
reg uops_5_iq_type_2; // @[util.scala:505:22]
reg uops_5_iq_type_3; // @[util.scala:505:22]
reg uops_5_fu_code_0; // @[util.scala:505:22]
reg uops_5_fu_code_1; // @[util.scala:505:22]
reg uops_5_fu_code_2; // @[util.scala:505:22]
reg uops_5_fu_code_3; // @[util.scala:505:22]
reg uops_5_fu_code_4; // @[util.scala:505:22]
reg uops_5_fu_code_5; // @[util.scala:505:22]
reg uops_5_fu_code_6; // @[util.scala:505:22]
reg uops_5_fu_code_7; // @[util.scala:505:22]
reg uops_5_fu_code_8; // @[util.scala:505:22]
reg uops_5_fu_code_9; // @[util.scala:505:22]
reg uops_5_iw_issued; // @[util.scala:505:22]
reg uops_5_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_5_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_5_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_5_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_5_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_5_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_5_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_5_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_5_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_5_br_mask_T_1 = uops_5_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_5_br_tag; // @[util.scala:505:22]
reg [3:0] uops_5_br_type; // @[util.scala:505:22]
reg uops_5_is_sfb; // @[util.scala:505:22]
reg uops_5_is_fence; // @[util.scala:505:22]
reg uops_5_is_fencei; // @[util.scala:505:22]
reg uops_5_is_sfence; // @[util.scala:505:22]
reg uops_5_is_amo; // @[util.scala:505:22]
reg uops_5_is_eret; // @[util.scala:505:22]
reg uops_5_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_5_is_rocc; // @[util.scala:505:22]
reg uops_5_is_mov; // @[util.scala:505:22]
reg [3:0] uops_5_ftq_idx; // @[util.scala:505:22]
reg uops_5_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_5_pc_lob; // @[util.scala:505:22]
reg uops_5_taken; // @[util.scala:505:22]
reg uops_5_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_5_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_5_pimm; // @[util.scala:505:22]
reg [19:0] uops_5_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_5_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_5_op2_sel; // @[util.scala:505:22]
reg uops_5_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_5_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_5_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_5_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_5_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_5_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_5_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_5_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_5_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_5_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_5_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_5_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_5_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_5_fp_ctrl_div; // @[util.scala:505:22]
reg uops_5_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_5_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_5_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_5_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_5_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_5_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_5_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_5_pdst; // @[util.scala:505:22]
reg [5:0] uops_5_prs1; // @[util.scala:505:22]
reg [5:0] uops_5_prs2; // @[util.scala:505:22]
reg [5:0] uops_5_prs3; // @[util.scala:505:22]
reg [3:0] uops_5_ppred; // @[util.scala:505:22]
reg uops_5_prs1_busy; // @[util.scala:505:22]
reg uops_5_prs2_busy; // @[util.scala:505:22]
reg uops_5_prs3_busy; // @[util.scala:505:22]
reg uops_5_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_5_stale_pdst; // @[util.scala:505:22]
reg uops_5_exception; // @[util.scala:505:22]
reg [63:0] uops_5_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_5_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_5_mem_size; // @[util.scala:505:22]
reg uops_5_mem_signed; // @[util.scala:505:22]
reg uops_5_uses_ldq; // @[util.scala:505:22]
reg uops_5_uses_stq; // @[util.scala:505:22]
reg uops_5_is_unique; // @[util.scala:505:22]
reg uops_5_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_5_csr_cmd; // @[util.scala:505:22]
reg uops_5_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_5_ldst; // @[util.scala:505:22]
reg [5:0] uops_5_lrs1; // @[util.scala:505:22]
reg [5:0] uops_5_lrs2; // @[util.scala:505:22]
reg [5:0] uops_5_lrs3; // @[util.scala:505:22]
reg [1:0] uops_5_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_5_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_5_lrs2_rtype; // @[util.scala:505:22]
reg uops_5_frs3_en; // @[util.scala:505:22]
reg uops_5_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_5_fcn_op; // @[util.scala:505:22]
reg uops_5_fp_val; // @[util.scala:505:22]
reg [2:0] uops_5_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_5_fp_typ; // @[util.scala:505:22]
reg uops_5_xcpt_pf_if; // @[util.scala:505:22]
reg uops_5_xcpt_ae_if; // @[util.scala:505:22]
reg uops_5_xcpt_ma_if; // @[util.scala:505:22]
reg uops_5_bp_debug_if; // @[util.scala:505:22]
reg uops_5_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_5_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_5_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_6_inst; // @[util.scala:505:22]
reg [31:0] uops_6_debug_inst; // @[util.scala:505:22]
reg uops_6_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_6_debug_pc; // @[util.scala:505:22]
reg uops_6_iq_type_0; // @[util.scala:505:22]
reg uops_6_iq_type_1; // @[util.scala:505:22]
reg uops_6_iq_type_2; // @[util.scala:505:22]
reg uops_6_iq_type_3; // @[util.scala:505:22]
reg uops_6_fu_code_0; // @[util.scala:505:22]
reg uops_6_fu_code_1; // @[util.scala:505:22]
reg uops_6_fu_code_2; // @[util.scala:505:22]
reg uops_6_fu_code_3; // @[util.scala:505:22]
reg uops_6_fu_code_4; // @[util.scala:505:22]
reg uops_6_fu_code_5; // @[util.scala:505:22]
reg uops_6_fu_code_6; // @[util.scala:505:22]
reg uops_6_fu_code_7; // @[util.scala:505:22]
reg uops_6_fu_code_8; // @[util.scala:505:22]
reg uops_6_fu_code_9; // @[util.scala:505:22]
reg uops_6_iw_issued; // @[util.scala:505:22]
reg uops_6_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_6_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_6_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_6_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_6_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_6_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_6_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_6_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_6_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_6_br_mask_T_1 = uops_6_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_6_br_tag; // @[util.scala:505:22]
reg [3:0] uops_6_br_type; // @[util.scala:505:22]
reg uops_6_is_sfb; // @[util.scala:505:22]
reg uops_6_is_fence; // @[util.scala:505:22]
reg uops_6_is_fencei; // @[util.scala:505:22]
reg uops_6_is_sfence; // @[util.scala:505:22]
reg uops_6_is_amo; // @[util.scala:505:22]
reg uops_6_is_eret; // @[util.scala:505:22]
reg uops_6_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_6_is_rocc; // @[util.scala:505:22]
reg uops_6_is_mov; // @[util.scala:505:22]
reg [3:0] uops_6_ftq_idx; // @[util.scala:505:22]
reg uops_6_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_6_pc_lob; // @[util.scala:505:22]
reg uops_6_taken; // @[util.scala:505:22]
reg uops_6_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_6_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_6_pimm; // @[util.scala:505:22]
reg [19:0] uops_6_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_6_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_6_op2_sel; // @[util.scala:505:22]
reg uops_6_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_6_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_6_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_6_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_6_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_6_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_6_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_6_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_6_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_6_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_6_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_6_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_6_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_6_fp_ctrl_div; // @[util.scala:505:22]
reg uops_6_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_6_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_6_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_6_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_6_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_6_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_6_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_6_pdst; // @[util.scala:505:22]
reg [5:0] uops_6_prs1; // @[util.scala:505:22]
reg [5:0] uops_6_prs2; // @[util.scala:505:22]
reg [5:0] uops_6_prs3; // @[util.scala:505:22]
reg [3:0] uops_6_ppred; // @[util.scala:505:22]
reg uops_6_prs1_busy; // @[util.scala:505:22]
reg uops_6_prs2_busy; // @[util.scala:505:22]
reg uops_6_prs3_busy; // @[util.scala:505:22]
reg uops_6_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_6_stale_pdst; // @[util.scala:505:22]
reg uops_6_exception; // @[util.scala:505:22]
reg [63:0] uops_6_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_6_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_6_mem_size; // @[util.scala:505:22]
reg uops_6_mem_signed; // @[util.scala:505:22]
reg uops_6_uses_ldq; // @[util.scala:505:22]
reg uops_6_uses_stq; // @[util.scala:505:22]
reg uops_6_is_unique; // @[util.scala:505:22]
reg uops_6_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_6_csr_cmd; // @[util.scala:505:22]
reg uops_6_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_6_ldst; // @[util.scala:505:22]
reg [5:0] uops_6_lrs1; // @[util.scala:505:22]
reg [5:0] uops_6_lrs2; // @[util.scala:505:22]
reg [5:0] uops_6_lrs3; // @[util.scala:505:22]
reg [1:0] uops_6_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_6_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_6_lrs2_rtype; // @[util.scala:505:22]
reg uops_6_frs3_en; // @[util.scala:505:22]
reg uops_6_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_6_fcn_op; // @[util.scala:505:22]
reg uops_6_fp_val; // @[util.scala:505:22]
reg [2:0] uops_6_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_6_fp_typ; // @[util.scala:505:22]
reg uops_6_xcpt_pf_if; // @[util.scala:505:22]
reg uops_6_xcpt_ae_if; // @[util.scala:505:22]
reg uops_6_xcpt_ma_if; // @[util.scala:505:22]
reg uops_6_bp_debug_if; // @[util.scala:505:22]
reg uops_6_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_6_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_6_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_7_inst; // @[util.scala:505:22]
reg [31:0] uops_7_debug_inst; // @[util.scala:505:22]
reg uops_7_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_7_debug_pc; // @[util.scala:505:22]
reg uops_7_iq_type_0; // @[util.scala:505:22]
reg uops_7_iq_type_1; // @[util.scala:505:22]
reg uops_7_iq_type_2; // @[util.scala:505:22]
reg uops_7_iq_type_3; // @[util.scala:505:22]
reg uops_7_fu_code_0; // @[util.scala:505:22]
reg uops_7_fu_code_1; // @[util.scala:505:22]
reg uops_7_fu_code_2; // @[util.scala:505:22]
reg uops_7_fu_code_3; // @[util.scala:505:22]
reg uops_7_fu_code_4; // @[util.scala:505:22]
reg uops_7_fu_code_5; // @[util.scala:505:22]
reg uops_7_fu_code_6; // @[util.scala:505:22]
reg uops_7_fu_code_7; // @[util.scala:505:22]
reg uops_7_fu_code_8; // @[util.scala:505:22]
reg uops_7_fu_code_9; // @[util.scala:505:22]
reg uops_7_iw_issued; // @[util.scala:505:22]
reg uops_7_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_7_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_7_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_7_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_7_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_7_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_7_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_7_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_7_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_7_br_mask_T_1 = uops_7_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_7_br_tag; // @[util.scala:505:22]
reg [3:0] uops_7_br_type; // @[util.scala:505:22]
reg uops_7_is_sfb; // @[util.scala:505:22]
reg uops_7_is_fence; // @[util.scala:505:22]
reg uops_7_is_fencei; // @[util.scala:505:22]
reg uops_7_is_sfence; // @[util.scala:505:22]
reg uops_7_is_amo; // @[util.scala:505:22]
reg uops_7_is_eret; // @[util.scala:505:22]
reg uops_7_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_7_is_rocc; // @[util.scala:505:22]
reg uops_7_is_mov; // @[util.scala:505:22]
reg [3:0] uops_7_ftq_idx; // @[util.scala:505:22]
reg uops_7_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_7_pc_lob; // @[util.scala:505:22]
reg uops_7_taken; // @[util.scala:505:22]
reg uops_7_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_7_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_7_pimm; // @[util.scala:505:22]
reg [19:0] uops_7_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_7_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_7_op2_sel; // @[util.scala:505:22]
reg uops_7_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_7_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_7_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_7_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_7_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_7_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_7_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_7_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_7_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_7_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_7_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_7_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_7_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_7_fp_ctrl_div; // @[util.scala:505:22]
reg uops_7_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_7_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_7_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_7_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_7_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_7_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_7_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_7_pdst; // @[util.scala:505:22]
reg [5:0] uops_7_prs1; // @[util.scala:505:22]
reg [5:0] uops_7_prs2; // @[util.scala:505:22]
reg [5:0] uops_7_prs3; // @[util.scala:505:22]
reg [3:0] uops_7_ppred; // @[util.scala:505:22]
reg uops_7_prs1_busy; // @[util.scala:505:22]
reg uops_7_prs2_busy; // @[util.scala:505:22]
reg uops_7_prs3_busy; // @[util.scala:505:22]
reg uops_7_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_7_stale_pdst; // @[util.scala:505:22]
reg uops_7_exception; // @[util.scala:505:22]
reg [63:0] uops_7_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_7_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_7_mem_size; // @[util.scala:505:22]
reg uops_7_mem_signed; // @[util.scala:505:22]
reg uops_7_uses_ldq; // @[util.scala:505:22]
reg uops_7_uses_stq; // @[util.scala:505:22]
reg uops_7_is_unique; // @[util.scala:505:22]
reg uops_7_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_7_csr_cmd; // @[util.scala:505:22]
reg uops_7_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_7_ldst; // @[util.scala:505:22]
reg [5:0] uops_7_lrs1; // @[util.scala:505:22]
reg [5:0] uops_7_lrs2; // @[util.scala:505:22]
reg [5:0] uops_7_lrs3; // @[util.scala:505:22]
reg [1:0] uops_7_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_7_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_7_lrs2_rtype; // @[util.scala:505:22]
reg uops_7_frs3_en; // @[util.scala:505:22]
reg uops_7_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_7_fcn_op; // @[util.scala:505:22]
reg uops_7_fp_val; // @[util.scala:505:22]
reg [2:0] uops_7_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_7_fp_typ; // @[util.scala:505:22]
reg uops_7_xcpt_pf_if; // @[util.scala:505:22]
reg uops_7_xcpt_ae_if; // @[util.scala:505:22]
reg uops_7_xcpt_ma_if; // @[util.scala:505:22]
reg uops_7_bp_debug_if; // @[util.scala:505:22]
reg uops_7_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_7_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_7_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_8_inst; // @[util.scala:505:22]
reg [31:0] uops_8_debug_inst; // @[util.scala:505:22]
reg uops_8_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_8_debug_pc; // @[util.scala:505:22]
reg uops_8_iq_type_0; // @[util.scala:505:22]
reg uops_8_iq_type_1; // @[util.scala:505:22]
reg uops_8_iq_type_2; // @[util.scala:505:22]
reg uops_8_iq_type_3; // @[util.scala:505:22]
reg uops_8_fu_code_0; // @[util.scala:505:22]
reg uops_8_fu_code_1; // @[util.scala:505:22]
reg uops_8_fu_code_2; // @[util.scala:505:22]
reg uops_8_fu_code_3; // @[util.scala:505:22]
reg uops_8_fu_code_4; // @[util.scala:505:22]
reg uops_8_fu_code_5; // @[util.scala:505:22]
reg uops_8_fu_code_6; // @[util.scala:505:22]
reg uops_8_fu_code_7; // @[util.scala:505:22]
reg uops_8_fu_code_8; // @[util.scala:505:22]
reg uops_8_fu_code_9; // @[util.scala:505:22]
reg uops_8_iw_issued; // @[util.scala:505:22]
reg uops_8_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_8_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_8_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_8_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_8_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_8_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_8_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_8_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_8_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_8_br_mask_T_1 = uops_8_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_8_br_tag; // @[util.scala:505:22]
reg [3:0] uops_8_br_type; // @[util.scala:505:22]
reg uops_8_is_sfb; // @[util.scala:505:22]
reg uops_8_is_fence; // @[util.scala:505:22]
reg uops_8_is_fencei; // @[util.scala:505:22]
reg uops_8_is_sfence; // @[util.scala:505:22]
reg uops_8_is_amo; // @[util.scala:505:22]
reg uops_8_is_eret; // @[util.scala:505:22]
reg uops_8_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_8_is_rocc; // @[util.scala:505:22]
reg uops_8_is_mov; // @[util.scala:505:22]
reg [3:0] uops_8_ftq_idx; // @[util.scala:505:22]
reg uops_8_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_8_pc_lob; // @[util.scala:505:22]
reg uops_8_taken; // @[util.scala:505:22]
reg uops_8_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_8_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_8_pimm; // @[util.scala:505:22]
reg [19:0] uops_8_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_8_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_8_op2_sel; // @[util.scala:505:22]
reg uops_8_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_8_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_8_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_8_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_8_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_8_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_8_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_8_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_8_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_8_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_8_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_8_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_8_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_8_fp_ctrl_div; // @[util.scala:505:22]
reg uops_8_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_8_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_8_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_8_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_8_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_8_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_8_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_8_pdst; // @[util.scala:505:22]
reg [5:0] uops_8_prs1; // @[util.scala:505:22]
reg [5:0] uops_8_prs2; // @[util.scala:505:22]
reg [5:0] uops_8_prs3; // @[util.scala:505:22]
reg [3:0] uops_8_ppred; // @[util.scala:505:22]
reg uops_8_prs1_busy; // @[util.scala:505:22]
reg uops_8_prs2_busy; // @[util.scala:505:22]
reg uops_8_prs3_busy; // @[util.scala:505:22]
reg uops_8_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_8_stale_pdst; // @[util.scala:505:22]
reg uops_8_exception; // @[util.scala:505:22]
reg [63:0] uops_8_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_8_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_8_mem_size; // @[util.scala:505:22]
reg uops_8_mem_signed; // @[util.scala:505:22]
reg uops_8_uses_ldq; // @[util.scala:505:22]
reg uops_8_uses_stq; // @[util.scala:505:22]
reg uops_8_is_unique; // @[util.scala:505:22]
reg uops_8_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_8_csr_cmd; // @[util.scala:505:22]
reg uops_8_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_8_ldst; // @[util.scala:505:22]
reg [5:0] uops_8_lrs1; // @[util.scala:505:22]
reg [5:0] uops_8_lrs2; // @[util.scala:505:22]
reg [5:0] uops_8_lrs3; // @[util.scala:505:22]
reg [1:0] uops_8_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_8_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_8_lrs2_rtype; // @[util.scala:505:22]
reg uops_8_frs3_en; // @[util.scala:505:22]
reg uops_8_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_8_fcn_op; // @[util.scala:505:22]
reg uops_8_fp_val; // @[util.scala:505:22]
reg [2:0] uops_8_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_8_fp_typ; // @[util.scala:505:22]
reg uops_8_xcpt_pf_if; // @[util.scala:505:22]
reg uops_8_xcpt_ae_if; // @[util.scala:505:22]
reg uops_8_xcpt_ma_if; // @[util.scala:505:22]
reg uops_8_bp_debug_if; // @[util.scala:505:22]
reg uops_8_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_8_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_8_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_9_inst; // @[util.scala:505:22]
reg [31:0] uops_9_debug_inst; // @[util.scala:505:22]
reg uops_9_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_9_debug_pc; // @[util.scala:505:22]
reg uops_9_iq_type_0; // @[util.scala:505:22]
reg uops_9_iq_type_1; // @[util.scala:505:22]
reg uops_9_iq_type_2; // @[util.scala:505:22]
reg uops_9_iq_type_3; // @[util.scala:505:22]
reg uops_9_fu_code_0; // @[util.scala:505:22]
reg uops_9_fu_code_1; // @[util.scala:505:22]
reg uops_9_fu_code_2; // @[util.scala:505:22]
reg uops_9_fu_code_3; // @[util.scala:505:22]
reg uops_9_fu_code_4; // @[util.scala:505:22]
reg uops_9_fu_code_5; // @[util.scala:505:22]
reg uops_9_fu_code_6; // @[util.scala:505:22]
reg uops_9_fu_code_7; // @[util.scala:505:22]
reg uops_9_fu_code_8; // @[util.scala:505:22]
reg uops_9_fu_code_9; // @[util.scala:505:22]
reg uops_9_iw_issued; // @[util.scala:505:22]
reg uops_9_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_9_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_9_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_9_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_9_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_9_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_9_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_9_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_9_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_9_br_mask_T_1 = uops_9_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_9_br_tag; // @[util.scala:505:22]
reg [3:0] uops_9_br_type; // @[util.scala:505:22]
reg uops_9_is_sfb; // @[util.scala:505:22]
reg uops_9_is_fence; // @[util.scala:505:22]
reg uops_9_is_fencei; // @[util.scala:505:22]
reg uops_9_is_sfence; // @[util.scala:505:22]
reg uops_9_is_amo; // @[util.scala:505:22]
reg uops_9_is_eret; // @[util.scala:505:22]
reg uops_9_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_9_is_rocc; // @[util.scala:505:22]
reg uops_9_is_mov; // @[util.scala:505:22]
reg [3:0] uops_9_ftq_idx; // @[util.scala:505:22]
reg uops_9_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_9_pc_lob; // @[util.scala:505:22]
reg uops_9_taken; // @[util.scala:505:22]
reg uops_9_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_9_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_9_pimm; // @[util.scala:505:22]
reg [19:0] uops_9_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_9_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_9_op2_sel; // @[util.scala:505:22]
reg uops_9_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_9_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_9_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_9_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_9_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_9_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_9_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_9_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_9_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_9_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_9_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_9_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_9_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_9_fp_ctrl_div; // @[util.scala:505:22]
reg uops_9_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_9_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_9_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_9_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_9_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_9_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_9_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_9_pdst; // @[util.scala:505:22]
reg [5:0] uops_9_prs1; // @[util.scala:505:22]
reg [5:0] uops_9_prs2; // @[util.scala:505:22]
reg [5:0] uops_9_prs3; // @[util.scala:505:22]
reg [3:0] uops_9_ppred; // @[util.scala:505:22]
reg uops_9_prs1_busy; // @[util.scala:505:22]
reg uops_9_prs2_busy; // @[util.scala:505:22]
reg uops_9_prs3_busy; // @[util.scala:505:22]
reg uops_9_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_9_stale_pdst; // @[util.scala:505:22]
reg uops_9_exception; // @[util.scala:505:22]
reg [63:0] uops_9_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_9_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_9_mem_size; // @[util.scala:505:22]
reg uops_9_mem_signed; // @[util.scala:505:22]
reg uops_9_uses_ldq; // @[util.scala:505:22]
reg uops_9_uses_stq; // @[util.scala:505:22]
reg uops_9_is_unique; // @[util.scala:505:22]
reg uops_9_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_9_csr_cmd; // @[util.scala:505:22]
reg uops_9_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_9_ldst; // @[util.scala:505:22]
reg [5:0] uops_9_lrs1; // @[util.scala:505:22]
reg [5:0] uops_9_lrs2; // @[util.scala:505:22]
reg [5:0] uops_9_lrs3; // @[util.scala:505:22]
reg [1:0] uops_9_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_9_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_9_lrs2_rtype; // @[util.scala:505:22]
reg uops_9_frs3_en; // @[util.scala:505:22]
reg uops_9_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_9_fcn_op; // @[util.scala:505:22]
reg uops_9_fp_val; // @[util.scala:505:22]
reg [2:0] uops_9_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_9_fp_typ; // @[util.scala:505:22]
reg uops_9_xcpt_pf_if; // @[util.scala:505:22]
reg uops_9_xcpt_ae_if; // @[util.scala:505:22]
reg uops_9_xcpt_ma_if; // @[util.scala:505:22]
reg uops_9_bp_debug_if; // @[util.scala:505:22]
reg uops_9_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_9_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_9_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_10_inst; // @[util.scala:505:22]
reg [31:0] uops_10_debug_inst; // @[util.scala:505:22]
reg uops_10_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_10_debug_pc; // @[util.scala:505:22]
reg uops_10_iq_type_0; // @[util.scala:505:22]
reg uops_10_iq_type_1; // @[util.scala:505:22]
reg uops_10_iq_type_2; // @[util.scala:505:22]
reg uops_10_iq_type_3; // @[util.scala:505:22]
reg uops_10_fu_code_0; // @[util.scala:505:22]
reg uops_10_fu_code_1; // @[util.scala:505:22]
reg uops_10_fu_code_2; // @[util.scala:505:22]
reg uops_10_fu_code_3; // @[util.scala:505:22]
reg uops_10_fu_code_4; // @[util.scala:505:22]
reg uops_10_fu_code_5; // @[util.scala:505:22]
reg uops_10_fu_code_6; // @[util.scala:505:22]
reg uops_10_fu_code_7; // @[util.scala:505:22]
reg uops_10_fu_code_8; // @[util.scala:505:22]
reg uops_10_fu_code_9; // @[util.scala:505:22]
reg uops_10_iw_issued; // @[util.scala:505:22]
reg uops_10_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_10_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_10_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_10_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_10_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_10_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_10_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_10_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_10_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_10_br_mask_T_1 = uops_10_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_10_br_tag; // @[util.scala:505:22]
reg [3:0] uops_10_br_type; // @[util.scala:505:22]
reg uops_10_is_sfb; // @[util.scala:505:22]
reg uops_10_is_fence; // @[util.scala:505:22]
reg uops_10_is_fencei; // @[util.scala:505:22]
reg uops_10_is_sfence; // @[util.scala:505:22]
reg uops_10_is_amo; // @[util.scala:505:22]
reg uops_10_is_eret; // @[util.scala:505:22]
reg uops_10_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_10_is_rocc; // @[util.scala:505:22]
reg uops_10_is_mov; // @[util.scala:505:22]
reg [3:0] uops_10_ftq_idx; // @[util.scala:505:22]
reg uops_10_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_10_pc_lob; // @[util.scala:505:22]
reg uops_10_taken; // @[util.scala:505:22]
reg uops_10_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_10_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_10_pimm; // @[util.scala:505:22]
reg [19:0] uops_10_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_10_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_10_op2_sel; // @[util.scala:505:22]
reg uops_10_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_10_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_10_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_10_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_10_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_10_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_10_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_10_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_10_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_10_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_10_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_10_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_10_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_10_fp_ctrl_div; // @[util.scala:505:22]
reg uops_10_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_10_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_10_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_10_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_10_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_10_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_10_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_10_pdst; // @[util.scala:505:22]
reg [5:0] uops_10_prs1; // @[util.scala:505:22]
reg [5:0] uops_10_prs2; // @[util.scala:505:22]
reg [5:0] uops_10_prs3; // @[util.scala:505:22]
reg [3:0] uops_10_ppred; // @[util.scala:505:22]
reg uops_10_prs1_busy; // @[util.scala:505:22]
reg uops_10_prs2_busy; // @[util.scala:505:22]
reg uops_10_prs3_busy; // @[util.scala:505:22]
reg uops_10_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_10_stale_pdst; // @[util.scala:505:22]
reg uops_10_exception; // @[util.scala:505:22]
reg [63:0] uops_10_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_10_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_10_mem_size; // @[util.scala:505:22]
reg uops_10_mem_signed; // @[util.scala:505:22]
reg uops_10_uses_ldq; // @[util.scala:505:22]
reg uops_10_uses_stq; // @[util.scala:505:22]
reg uops_10_is_unique; // @[util.scala:505:22]
reg uops_10_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_10_csr_cmd; // @[util.scala:505:22]
reg uops_10_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_10_ldst; // @[util.scala:505:22]
reg [5:0] uops_10_lrs1; // @[util.scala:505:22]
reg [5:0] uops_10_lrs2; // @[util.scala:505:22]
reg [5:0] uops_10_lrs3; // @[util.scala:505:22]
reg [1:0] uops_10_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_10_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_10_lrs2_rtype; // @[util.scala:505:22]
reg uops_10_frs3_en; // @[util.scala:505:22]
reg uops_10_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_10_fcn_op; // @[util.scala:505:22]
reg uops_10_fp_val; // @[util.scala:505:22]
reg [2:0] uops_10_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_10_fp_typ; // @[util.scala:505:22]
reg uops_10_xcpt_pf_if; // @[util.scala:505:22]
reg uops_10_xcpt_ae_if; // @[util.scala:505:22]
reg uops_10_xcpt_ma_if; // @[util.scala:505:22]
reg uops_10_bp_debug_if; // @[util.scala:505:22]
reg uops_10_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_10_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_10_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_11_inst; // @[util.scala:505:22]
reg [31:0] uops_11_debug_inst; // @[util.scala:505:22]
reg uops_11_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_11_debug_pc; // @[util.scala:505:22]
reg uops_11_iq_type_0; // @[util.scala:505:22]
reg uops_11_iq_type_1; // @[util.scala:505:22]
reg uops_11_iq_type_2; // @[util.scala:505:22]
reg uops_11_iq_type_3; // @[util.scala:505:22]
reg uops_11_fu_code_0; // @[util.scala:505:22]
reg uops_11_fu_code_1; // @[util.scala:505:22]
reg uops_11_fu_code_2; // @[util.scala:505:22]
reg uops_11_fu_code_3; // @[util.scala:505:22]
reg uops_11_fu_code_4; // @[util.scala:505:22]
reg uops_11_fu_code_5; // @[util.scala:505:22]
reg uops_11_fu_code_6; // @[util.scala:505:22]
reg uops_11_fu_code_7; // @[util.scala:505:22]
reg uops_11_fu_code_8; // @[util.scala:505:22]
reg uops_11_fu_code_9; // @[util.scala:505:22]
reg uops_11_iw_issued; // @[util.scala:505:22]
reg uops_11_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_11_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_11_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_11_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_11_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_11_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_11_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_11_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_11_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_11_br_mask_T_1 = uops_11_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_11_br_tag; // @[util.scala:505:22]
reg [3:0] uops_11_br_type; // @[util.scala:505:22]
reg uops_11_is_sfb; // @[util.scala:505:22]
reg uops_11_is_fence; // @[util.scala:505:22]
reg uops_11_is_fencei; // @[util.scala:505:22]
reg uops_11_is_sfence; // @[util.scala:505:22]
reg uops_11_is_amo; // @[util.scala:505:22]
reg uops_11_is_eret; // @[util.scala:505:22]
reg uops_11_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_11_is_rocc; // @[util.scala:505:22]
reg uops_11_is_mov; // @[util.scala:505:22]
reg [3:0] uops_11_ftq_idx; // @[util.scala:505:22]
reg uops_11_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_11_pc_lob; // @[util.scala:505:22]
reg uops_11_taken; // @[util.scala:505:22]
reg uops_11_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_11_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_11_pimm; // @[util.scala:505:22]
reg [19:0] uops_11_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_11_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_11_op2_sel; // @[util.scala:505:22]
reg uops_11_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_11_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_11_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_11_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_11_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_11_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_11_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_11_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_11_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_11_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_11_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_11_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_11_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_11_fp_ctrl_div; // @[util.scala:505:22]
reg uops_11_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_11_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_11_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_11_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_11_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_11_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_11_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_11_pdst; // @[util.scala:505:22]
reg [5:0] uops_11_prs1; // @[util.scala:505:22]
reg [5:0] uops_11_prs2; // @[util.scala:505:22]
reg [5:0] uops_11_prs3; // @[util.scala:505:22]
reg [3:0] uops_11_ppred; // @[util.scala:505:22]
reg uops_11_prs1_busy; // @[util.scala:505:22]
reg uops_11_prs2_busy; // @[util.scala:505:22]
reg uops_11_prs3_busy; // @[util.scala:505:22]
reg uops_11_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_11_stale_pdst; // @[util.scala:505:22]
reg uops_11_exception; // @[util.scala:505:22]
reg [63:0] uops_11_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_11_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_11_mem_size; // @[util.scala:505:22]
reg uops_11_mem_signed; // @[util.scala:505:22]
reg uops_11_uses_ldq; // @[util.scala:505:22]
reg uops_11_uses_stq; // @[util.scala:505:22]
reg uops_11_is_unique; // @[util.scala:505:22]
reg uops_11_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_11_csr_cmd; // @[util.scala:505:22]
reg uops_11_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_11_ldst; // @[util.scala:505:22]
reg [5:0] uops_11_lrs1; // @[util.scala:505:22]
reg [5:0] uops_11_lrs2; // @[util.scala:505:22]
reg [5:0] uops_11_lrs3; // @[util.scala:505:22]
reg [1:0] uops_11_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_11_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_11_lrs2_rtype; // @[util.scala:505:22]
reg uops_11_frs3_en; // @[util.scala:505:22]
reg uops_11_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_11_fcn_op; // @[util.scala:505:22]
reg uops_11_fp_val; // @[util.scala:505:22]
reg [2:0] uops_11_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_11_fp_typ; // @[util.scala:505:22]
reg uops_11_xcpt_pf_if; // @[util.scala:505:22]
reg uops_11_xcpt_ae_if; // @[util.scala:505:22]
reg uops_11_xcpt_ma_if; // @[util.scala:505:22]
reg uops_11_bp_debug_if; // @[util.scala:505:22]
reg uops_11_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_11_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_11_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_12_inst; // @[util.scala:505:22]
reg [31:0] uops_12_debug_inst; // @[util.scala:505:22]
reg uops_12_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_12_debug_pc; // @[util.scala:505:22]
reg uops_12_iq_type_0; // @[util.scala:505:22]
reg uops_12_iq_type_1; // @[util.scala:505:22]
reg uops_12_iq_type_2; // @[util.scala:505:22]
reg uops_12_iq_type_3; // @[util.scala:505:22]
reg uops_12_fu_code_0; // @[util.scala:505:22]
reg uops_12_fu_code_1; // @[util.scala:505:22]
reg uops_12_fu_code_2; // @[util.scala:505:22]
reg uops_12_fu_code_3; // @[util.scala:505:22]
reg uops_12_fu_code_4; // @[util.scala:505:22]
reg uops_12_fu_code_5; // @[util.scala:505:22]
reg uops_12_fu_code_6; // @[util.scala:505:22]
reg uops_12_fu_code_7; // @[util.scala:505:22]
reg uops_12_fu_code_8; // @[util.scala:505:22]
reg uops_12_fu_code_9; // @[util.scala:505:22]
reg uops_12_iw_issued; // @[util.scala:505:22]
reg uops_12_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_12_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_12_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_12_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_12_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_12_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_12_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_12_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_12_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_12_br_mask_T_1 = uops_12_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_12_br_tag; // @[util.scala:505:22]
reg [3:0] uops_12_br_type; // @[util.scala:505:22]
reg uops_12_is_sfb; // @[util.scala:505:22]
reg uops_12_is_fence; // @[util.scala:505:22]
reg uops_12_is_fencei; // @[util.scala:505:22]
reg uops_12_is_sfence; // @[util.scala:505:22]
reg uops_12_is_amo; // @[util.scala:505:22]
reg uops_12_is_eret; // @[util.scala:505:22]
reg uops_12_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_12_is_rocc; // @[util.scala:505:22]
reg uops_12_is_mov; // @[util.scala:505:22]
reg [3:0] uops_12_ftq_idx; // @[util.scala:505:22]
reg uops_12_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_12_pc_lob; // @[util.scala:505:22]
reg uops_12_taken; // @[util.scala:505:22]
reg uops_12_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_12_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_12_pimm; // @[util.scala:505:22]
reg [19:0] uops_12_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_12_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_12_op2_sel; // @[util.scala:505:22]
reg uops_12_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_12_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_12_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_12_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_12_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_12_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_12_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_12_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_12_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_12_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_12_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_12_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_12_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_12_fp_ctrl_div; // @[util.scala:505:22]
reg uops_12_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_12_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_12_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_12_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_12_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_12_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_12_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_12_pdst; // @[util.scala:505:22]
reg [5:0] uops_12_prs1; // @[util.scala:505:22]
reg [5:0] uops_12_prs2; // @[util.scala:505:22]
reg [5:0] uops_12_prs3; // @[util.scala:505:22]
reg [3:0] uops_12_ppred; // @[util.scala:505:22]
reg uops_12_prs1_busy; // @[util.scala:505:22]
reg uops_12_prs2_busy; // @[util.scala:505:22]
reg uops_12_prs3_busy; // @[util.scala:505:22]
reg uops_12_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_12_stale_pdst; // @[util.scala:505:22]
reg uops_12_exception; // @[util.scala:505:22]
reg [63:0] uops_12_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_12_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_12_mem_size; // @[util.scala:505:22]
reg uops_12_mem_signed; // @[util.scala:505:22]
reg uops_12_uses_ldq; // @[util.scala:505:22]
reg uops_12_uses_stq; // @[util.scala:505:22]
reg uops_12_is_unique; // @[util.scala:505:22]
reg uops_12_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_12_csr_cmd; // @[util.scala:505:22]
reg uops_12_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_12_ldst; // @[util.scala:505:22]
reg [5:0] uops_12_lrs1; // @[util.scala:505:22]
reg [5:0] uops_12_lrs2; // @[util.scala:505:22]
reg [5:0] uops_12_lrs3; // @[util.scala:505:22]
reg [1:0] uops_12_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_12_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_12_lrs2_rtype; // @[util.scala:505:22]
reg uops_12_frs3_en; // @[util.scala:505:22]
reg uops_12_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_12_fcn_op; // @[util.scala:505:22]
reg uops_12_fp_val; // @[util.scala:505:22]
reg [2:0] uops_12_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_12_fp_typ; // @[util.scala:505:22]
reg uops_12_xcpt_pf_if; // @[util.scala:505:22]
reg uops_12_xcpt_ae_if; // @[util.scala:505:22]
reg uops_12_xcpt_ma_if; // @[util.scala:505:22]
reg uops_12_bp_debug_if; // @[util.scala:505:22]
reg uops_12_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_12_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_12_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_13_inst; // @[util.scala:505:22]
reg [31:0] uops_13_debug_inst; // @[util.scala:505:22]
reg uops_13_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_13_debug_pc; // @[util.scala:505:22]
reg uops_13_iq_type_0; // @[util.scala:505:22]
reg uops_13_iq_type_1; // @[util.scala:505:22]
reg uops_13_iq_type_2; // @[util.scala:505:22]
reg uops_13_iq_type_3; // @[util.scala:505:22]
reg uops_13_fu_code_0; // @[util.scala:505:22]
reg uops_13_fu_code_1; // @[util.scala:505:22]
reg uops_13_fu_code_2; // @[util.scala:505:22]
reg uops_13_fu_code_3; // @[util.scala:505:22]
reg uops_13_fu_code_4; // @[util.scala:505:22]
reg uops_13_fu_code_5; // @[util.scala:505:22]
reg uops_13_fu_code_6; // @[util.scala:505:22]
reg uops_13_fu_code_7; // @[util.scala:505:22]
reg uops_13_fu_code_8; // @[util.scala:505:22]
reg uops_13_fu_code_9; // @[util.scala:505:22]
reg uops_13_iw_issued; // @[util.scala:505:22]
reg uops_13_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_13_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_13_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_13_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_13_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_13_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_13_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_13_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_13_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_13_br_mask_T_1 = uops_13_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_13_br_tag; // @[util.scala:505:22]
reg [3:0] uops_13_br_type; // @[util.scala:505:22]
reg uops_13_is_sfb; // @[util.scala:505:22]
reg uops_13_is_fence; // @[util.scala:505:22]
reg uops_13_is_fencei; // @[util.scala:505:22]
reg uops_13_is_sfence; // @[util.scala:505:22]
reg uops_13_is_amo; // @[util.scala:505:22]
reg uops_13_is_eret; // @[util.scala:505:22]
reg uops_13_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_13_is_rocc; // @[util.scala:505:22]
reg uops_13_is_mov; // @[util.scala:505:22]
reg [3:0] uops_13_ftq_idx; // @[util.scala:505:22]
reg uops_13_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_13_pc_lob; // @[util.scala:505:22]
reg uops_13_taken; // @[util.scala:505:22]
reg uops_13_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_13_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_13_pimm; // @[util.scala:505:22]
reg [19:0] uops_13_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_13_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_13_op2_sel; // @[util.scala:505:22]
reg uops_13_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_13_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_13_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_13_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_13_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_13_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_13_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_13_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_13_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_13_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_13_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_13_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_13_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_13_fp_ctrl_div; // @[util.scala:505:22]
reg uops_13_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_13_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_13_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_13_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_13_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_13_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_13_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_13_pdst; // @[util.scala:505:22]
reg [5:0] uops_13_prs1; // @[util.scala:505:22]
reg [5:0] uops_13_prs2; // @[util.scala:505:22]
reg [5:0] uops_13_prs3; // @[util.scala:505:22]
reg [3:0] uops_13_ppred; // @[util.scala:505:22]
reg uops_13_prs1_busy; // @[util.scala:505:22]
reg uops_13_prs2_busy; // @[util.scala:505:22]
reg uops_13_prs3_busy; // @[util.scala:505:22]
reg uops_13_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_13_stale_pdst; // @[util.scala:505:22]
reg uops_13_exception; // @[util.scala:505:22]
reg [63:0] uops_13_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_13_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_13_mem_size; // @[util.scala:505:22]
reg uops_13_mem_signed; // @[util.scala:505:22]
reg uops_13_uses_ldq; // @[util.scala:505:22]
reg uops_13_uses_stq; // @[util.scala:505:22]
reg uops_13_is_unique; // @[util.scala:505:22]
reg uops_13_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_13_csr_cmd; // @[util.scala:505:22]
reg uops_13_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_13_ldst; // @[util.scala:505:22]
reg [5:0] uops_13_lrs1; // @[util.scala:505:22]
reg [5:0] uops_13_lrs2; // @[util.scala:505:22]
reg [5:0] uops_13_lrs3; // @[util.scala:505:22]
reg [1:0] uops_13_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_13_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_13_lrs2_rtype; // @[util.scala:505:22]
reg uops_13_frs3_en; // @[util.scala:505:22]
reg uops_13_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_13_fcn_op; // @[util.scala:505:22]
reg uops_13_fp_val; // @[util.scala:505:22]
reg [2:0] uops_13_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_13_fp_typ; // @[util.scala:505:22]
reg uops_13_xcpt_pf_if; // @[util.scala:505:22]
reg uops_13_xcpt_ae_if; // @[util.scala:505:22]
reg uops_13_xcpt_ma_if; // @[util.scala:505:22]
reg uops_13_bp_debug_if; // @[util.scala:505:22]
reg uops_13_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_13_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_13_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_14_inst; // @[util.scala:505:22]
reg [31:0] uops_14_debug_inst; // @[util.scala:505:22]
reg uops_14_is_rvc; // @[util.scala:505:22]
reg [33:0] uops_14_debug_pc; // @[util.scala:505:22]
reg uops_14_iq_type_0; // @[util.scala:505:22]
reg uops_14_iq_type_1; // @[util.scala:505:22]
reg uops_14_iq_type_2; // @[util.scala:505:22]
reg uops_14_iq_type_3; // @[util.scala:505:22]
reg uops_14_fu_code_0; // @[util.scala:505:22]
reg uops_14_fu_code_1; // @[util.scala:505:22]
reg uops_14_fu_code_2; // @[util.scala:505:22]
reg uops_14_fu_code_3; // @[util.scala:505:22]
reg uops_14_fu_code_4; // @[util.scala:505:22]
reg uops_14_fu_code_5; // @[util.scala:505:22]
reg uops_14_fu_code_6; // @[util.scala:505:22]
reg uops_14_fu_code_7; // @[util.scala:505:22]
reg uops_14_fu_code_8; // @[util.scala:505:22]
reg uops_14_fu_code_9; // @[util.scala:505:22]
reg uops_14_iw_issued; // @[util.scala:505:22]
reg uops_14_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_14_iw_issued_partial_dgen; // @[util.scala:505:22]
reg uops_14_iw_p1_speculative_child; // @[util.scala:505:22]
reg uops_14_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_14_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_14_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_14_iw_p3_bypass_hint; // @[util.scala:505:22]
reg uops_14_dis_col_sel; // @[util.scala:505:22]
reg [3:0] uops_14_br_mask; // @[util.scala:505:22]
wire [3:0] _uops_14_br_mask_T_1 = uops_14_br_mask; // @[util.scala:97:21, :505:22]
reg [1:0] uops_14_br_tag; // @[util.scala:505:22]
reg [3:0] uops_14_br_type; // @[util.scala:505:22]
reg uops_14_is_sfb; // @[util.scala:505:22]
reg uops_14_is_fence; // @[util.scala:505:22]
reg uops_14_is_fencei; // @[util.scala:505:22]
reg uops_14_is_sfence; // @[util.scala:505:22]
reg uops_14_is_amo; // @[util.scala:505:22]
reg uops_14_is_eret; // @[util.scala:505:22]
reg uops_14_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_14_is_rocc; // @[util.scala:505:22]
reg uops_14_is_mov; // @[util.scala:505:22]
reg [3:0] uops_14_ftq_idx; // @[util.scala:505:22]
reg uops_14_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_14_pc_lob; // @[util.scala:505:22]
reg uops_14_taken; // @[util.scala:505:22]
reg uops_14_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_14_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_14_pimm; // @[util.scala:505:22]
reg [19:0] uops_14_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_14_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_14_op2_sel; // @[util.scala:505:22]
reg uops_14_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_14_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_14_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_14_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_14_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_14_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_14_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_14_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_14_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_14_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_14_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_14_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_14_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_14_fp_ctrl_div; // @[util.scala:505:22]
reg uops_14_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_14_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_14_fp_ctrl_vec; // @[util.scala:505:22]
reg [4:0] uops_14_rob_idx; // @[util.scala:505:22]
reg [3:0] uops_14_ldq_idx; // @[util.scala:505:22]
reg [3:0] uops_14_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_14_rxq_idx; // @[util.scala:505:22]
reg [5:0] uops_14_pdst; // @[util.scala:505:22]
reg [5:0] uops_14_prs1; // @[util.scala:505:22]
reg [5:0] uops_14_prs2; // @[util.scala:505:22]
reg [5:0] uops_14_prs3; // @[util.scala:505:22]
reg [3:0] uops_14_ppred; // @[util.scala:505:22]
reg uops_14_prs1_busy; // @[util.scala:505:22]
reg uops_14_prs2_busy; // @[util.scala:505:22]
reg uops_14_prs3_busy; // @[util.scala:505:22]
reg uops_14_ppred_busy; // @[util.scala:505:22]
reg [5:0] uops_14_stale_pdst; // @[util.scala:505:22]
reg uops_14_exception; // @[util.scala:505:22]
reg [63:0] uops_14_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_14_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_14_mem_size; // @[util.scala:505:22]
reg uops_14_mem_signed; // @[util.scala:505:22]
reg uops_14_uses_ldq; // @[util.scala:505:22]
reg uops_14_uses_stq; // @[util.scala:505:22]
reg uops_14_is_unique; // @[util.scala:505:22]
reg uops_14_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_14_csr_cmd; // @[util.scala:505:22]
reg uops_14_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_14_ldst; // @[util.scala:505:22]
reg [5:0] uops_14_lrs1; // @[util.scala:505:22]
reg [5:0] uops_14_lrs2; // @[util.scala:505:22]
reg [5:0] uops_14_lrs3; // @[util.scala:505:22]
reg [1:0] uops_14_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_14_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_14_lrs2_rtype; // @[util.scala:505:22]
reg uops_14_frs3_en; // @[util.scala:505:22]
reg uops_14_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_14_fcn_op; // @[util.scala:505:22]
reg uops_14_fp_val; // @[util.scala:505:22]
reg [2:0] uops_14_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_14_fp_typ; // @[util.scala:505:22]
reg uops_14_xcpt_pf_if; // @[util.scala:505:22]
reg uops_14_xcpt_ae_if; // @[util.scala:505:22]
reg uops_14_xcpt_ma_if; // @[util.scala:505:22]
reg uops_14_bp_debug_if; // @[util.scala:505:22]
reg uops_14_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_14_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_14_debug_tsrc; // @[util.scala:505:22]
reg [3:0] enq_ptr_value; // @[Counter.scala:61:40]
reg [3:0] deq_ptr_value; // @[Counter.scala:61:40]
reg maybe_full; // @[util.scala:509:29]
wire ptr_match = enq_ptr_value == deq_ptr_value; // @[Counter.scala:61:40]
wire _io_empty_T = ~maybe_full; // @[util.scala:509:29, :512:30]
assign _io_empty_T_1 = ptr_match & _io_empty_T; // @[util.scala:511:35, :512:{27,30}]
assign io_empty_0 = _io_empty_T_1; // @[util.scala:458:7, :512:27]
wire full = ptr_match & maybe_full; // @[util.scala:509:29, :511:35, :513:26]
wire _do_enq_T = io_enq_ready_0 & io_enq_valid_0; // @[Decoupled.scala:51:35]
wire _do_enq_T_5 = _do_enq_T; // @[Decoupled.scala:51:35]
wire _do_enq_T_8 = _do_enq_T_5; // @[util.scala:514:{39,99}]
wire do_enq = _do_enq_T_8; // @[util.scala:514:{26,99}]
wire [15:0] _GEN = {{valids_0}, {valids_14}, {valids_13}, {valids_12}, {valids_11}, {valids_10}, {valids_9}, {valids_8}, {valids_7}, {valids_6}, {valids_5}, {valids_4}, {valids_3}, {valids_2}, {valids_1}, {valids_0}}; // @[util.scala:504:26, :515:44]
wire _GEN_0 = _GEN[deq_ptr_value]; // @[Counter.scala:61:40]
wire _do_deq_T = ~_GEN_0; // @[util.scala:515:44]
wire _do_deq_T_1 = io_deq_ready_0 | _do_deq_T; // @[util.scala:458:7, :515:{41,44}]
wire _do_deq_T_2 = ~io_empty_0; // @[util.scala:458:7, :515:71]
wire _do_deq_T_3 = _do_deq_T_1 & _do_deq_T_2; // @[util.scala:515:{41,68,71}]
wire do_deq = _do_deq_T_3; // @[util.scala:515:{26,68}]
wire _valids_0_T_7 = _valids_0_T_4; // @[util.scala:520:{31,80}]
wire _valids_1_T_7 = _valids_1_T_4; // @[util.scala:520:{31,80}]
wire _valids_2_T_7 = _valids_2_T_4; // @[util.scala:520:{31,80}]
wire _valids_3_T_7 = _valids_3_T_4; // @[util.scala:520:{31,80}]
wire _valids_4_T_7 = _valids_4_T_4; // @[util.scala:520:{31,80}]
wire _valids_5_T_7 = _valids_5_T_4; // @[util.scala:520:{31,80}]
wire _valids_6_T_7 = _valids_6_T_4; // @[util.scala:520:{31,80}]
wire _valids_7_T_7 = _valids_7_T_4; // @[util.scala:520:{31,80}]
wire _valids_8_T_7 = _valids_8_T_4; // @[util.scala:520:{31,80}]
wire _valids_9_T_7 = _valids_9_T_4; // @[util.scala:520:{31,80}]
wire _valids_10_T_7 = _valids_10_T_4; // @[util.scala:520:{31,80}]
wire _valids_11_T_7 = _valids_11_T_4; // @[util.scala:520:{31,80}]
wire _valids_12_T_7 = _valids_12_T_4; // @[util.scala:520:{31,80}]
wire _valids_13_T_7 = _valids_13_T_4; // @[util.scala:520:{31,80}]
wire _valids_14_T_7 = _valids_14_T_4; // @[util.scala:520:{31,80}]
wire wrap = enq_ptr_value == 4'hE; // @[Counter.scala:61:40, :73:24]
wire [4:0] _GEN_1 = {1'h0, enq_ptr_value}; // @[Counter.scala:61:40, :77:24]
wire [4:0] _value_T = _GEN_1 + 5'h1; // @[Counter.scala:77:24]
wire [3:0] _value_T_1 = _value_T[3:0]; // @[Counter.scala:77:24]
wire wrap_1 = deq_ptr_value == 4'hE; // @[Counter.scala:61:40, :73:24]
wire [4:0] _GEN_2 = {1'h0, deq_ptr_value}; // @[Counter.scala:61:40, :77:24]
wire [4:0] _value_T_2 = _GEN_2 + 5'h1; // @[Counter.scala:77:24]
wire [3:0] _value_T_3 = _value_T_2[3:0]; // @[Counter.scala:77:24]
assign _io_enq_ready_T = ~full; // @[util.scala:513:26, :543:21]
assign io_enq_ready_0 = _io_enq_ready_T; // @[util.scala:458:7, :543:21]
assign io_deq_bits_uop_inst_0 = out_uop_inst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_debug_inst_0 = out_uop_debug_inst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_rvc_0 = out_uop_is_rvc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_debug_pc_0 = out_uop_debug_pc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iq_type_0_0 = out_uop_iq_type_0; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iq_type_1_0 = out_uop_iq_type_1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iq_type_2_0 = out_uop_iq_type_2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iq_type_3_0 = out_uop_iq_type_3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_0_0 = out_uop_fu_code_0; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_1_0 = out_uop_fu_code_1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_2_0 = out_uop_fu_code_2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_3_0 = out_uop_fu_code_3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_4_0 = out_uop_fu_code_4; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_5_0 = out_uop_fu_code_5; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_6_0 = out_uop_fu_code_6; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_7_0 = out_uop_fu_code_7; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_8_0 = out_uop_fu_code_8; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_9_0 = out_uop_fu_code_9; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_issued_0 = out_uop_iw_issued; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_issued_partial_agen_0 = out_uop_iw_issued_partial_agen; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_issued_partial_dgen_0 = out_uop_iw_issued_partial_dgen; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p1_speculative_child_0 = out_uop_iw_p1_speculative_child; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p2_speculative_child_0 = out_uop_iw_p2_speculative_child; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p1_bypass_hint_0 = out_uop_iw_p1_bypass_hint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p2_bypass_hint_0 = out_uop_iw_p2_bypass_hint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p3_bypass_hint_0 = out_uop_iw_p3_bypass_hint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_dis_col_sel_0 = out_uop_dis_col_sel; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_br_mask_0 = out_uop_br_mask; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_br_tag_0 = out_uop_br_tag; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_br_type_0 = out_uop_br_type; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_sfb_0 = out_uop_is_sfb; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_fence_0 = out_uop_is_fence; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_fencei_0 = out_uop_is_fencei; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_sfence_0 = out_uop_is_sfence; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_amo_0 = out_uop_is_amo; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_eret_0 = out_uop_is_eret; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_sys_pc2epc_0 = out_uop_is_sys_pc2epc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_rocc_0 = out_uop_is_rocc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_mov_0 = out_uop_is_mov; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ftq_idx_0 = out_uop_ftq_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_edge_inst_0 = out_uop_edge_inst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_pc_lob_0 = out_uop_pc_lob; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_taken_0 = out_uop_taken; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_imm_rename_0 = out_uop_imm_rename; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_imm_sel_0 = out_uop_imm_sel; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_pimm_0 = out_uop_pimm; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_imm_packed_0 = out_uop_imm_packed; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_op1_sel_0 = out_uop_op1_sel; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_op2_sel_0 = out_uop_op2_sel; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_ldst_0 = out_uop_fp_ctrl_ldst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_wen_0 = out_uop_fp_ctrl_wen; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_ren1_0 = out_uop_fp_ctrl_ren1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_ren2_0 = out_uop_fp_ctrl_ren2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_ren3_0 = out_uop_fp_ctrl_ren3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_swap12_0 = out_uop_fp_ctrl_swap12; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_swap23_0 = out_uop_fp_ctrl_swap23; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_typeTagIn_0 = out_uop_fp_ctrl_typeTagIn; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_typeTagOut_0 = out_uop_fp_ctrl_typeTagOut; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_fromint_0 = out_uop_fp_ctrl_fromint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_toint_0 = out_uop_fp_ctrl_toint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_fastpipe_0 = out_uop_fp_ctrl_fastpipe; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_fma_0 = out_uop_fp_ctrl_fma; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_div_0 = out_uop_fp_ctrl_div; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_sqrt_0 = out_uop_fp_ctrl_sqrt; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_wflags_0 = out_uop_fp_ctrl_wflags; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_vec_0 = out_uop_fp_ctrl_vec; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_rob_idx_0 = out_uop_rob_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ldq_idx_0 = out_uop_ldq_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_stq_idx_0 = out_uop_stq_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_rxq_idx_0 = out_uop_rxq_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_pdst_0 = out_uop_pdst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs1_0 = out_uop_prs1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs2_0 = out_uop_prs2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs3_0 = out_uop_prs3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ppred_0 = out_uop_ppred; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs1_busy_0 = out_uop_prs1_busy; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs2_busy_0 = out_uop_prs2_busy; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs3_busy_0 = out_uop_prs3_busy; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ppred_busy_0 = out_uop_ppred_busy; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_stale_pdst_0 = out_uop_stale_pdst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_exception_0 = out_uop_exception; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_exc_cause_0 = out_uop_exc_cause; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_mem_cmd_0 = out_uop_mem_cmd; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_mem_size_0 = out_uop_mem_size; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_mem_signed_0 = out_uop_mem_signed; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_uses_ldq_0 = out_uop_uses_ldq; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_uses_stq_0 = out_uop_uses_stq; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_unique_0 = out_uop_is_unique; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_flush_on_commit_0 = out_uop_flush_on_commit; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_csr_cmd_0 = out_uop_csr_cmd; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ldst_is_rs1_0 = out_uop_ldst_is_rs1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ldst_0 = out_uop_ldst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs1_0 = out_uop_lrs1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs2_0 = out_uop_lrs2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs3_0 = out_uop_lrs3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_dst_rtype_0 = out_uop_dst_rtype; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs1_rtype_0 = out_uop_lrs1_rtype; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs2_rtype_0 = out_uop_lrs2_rtype; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_frs3_en_0 = out_uop_frs3_en; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fcn_dw_0 = out_uop_fcn_dw; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fcn_op_0 = out_uop_fcn_op; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_val_0 = out_uop_fp_val; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_rm_0 = out_uop_fp_rm; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_typ_0 = out_uop_fp_typ; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_xcpt_pf_if_0 = out_uop_xcpt_pf_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_xcpt_ae_if_0 = out_uop_xcpt_ae_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_xcpt_ma_if_0 = out_uop_xcpt_ma_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_bp_debug_if_0 = out_uop_bp_debug_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_bp_xcpt_if_0 = out_uop_bp_xcpt_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_debug_fsrc_0 = out_uop_debug_fsrc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_debug_tsrc_0 = out_uop_debug_tsrc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_addr_0 = out_addr; // @[util.scala:458:7, :545:19]
assign io_deq_bits_data_0 = out_data; // @[util.scala:458:7, :545:19]
assign io_deq_bits_is_hella_0 = out_is_hella; // @[util.scala:458:7, :545:19]
assign io_deq_bits_tag_match_0 = out_tag_match; // @[util.scala:458:7, :545:19]
assign io_deq_bits_old_meta_coh_state_0 = out_old_meta_coh_state; // @[util.scala:458:7, :545:19]
assign io_deq_bits_old_meta_tag_0 = out_old_meta_tag; // @[util.scala:458:7, :545:19]
assign io_deq_bits_way_en_0 = out_way_en; // @[util.scala:458:7, :545:19]
assign io_deq_bits_sdq_id_0 = out_sdq_id; // @[util.scala:458:7, :545:19]
wire [15:0][31:0] _GEN_3 = {{uops_0_inst}, {uops_14_inst}, {uops_13_inst}, {uops_12_inst}, {uops_11_inst}, {uops_10_inst}, {uops_9_inst}, {uops_8_inst}, {uops_7_inst}, {uops_6_inst}, {uops_5_inst}, {uops_4_inst}, {uops_3_inst}, {uops_2_inst}, {uops_1_inst}, {uops_0_inst}}; // @[util.scala:505:22, :547:21]
assign out_uop_inst = _GEN_3[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][31:0] _GEN_4 = {{uops_0_debug_inst}, {uops_14_debug_inst}, {uops_13_debug_inst}, {uops_12_debug_inst}, {uops_11_debug_inst}, {uops_10_debug_inst}, {uops_9_debug_inst}, {uops_8_debug_inst}, {uops_7_debug_inst}, {uops_6_debug_inst}, {uops_5_debug_inst}, {uops_4_debug_inst}, {uops_3_debug_inst}, {uops_2_debug_inst}, {uops_1_debug_inst}, {uops_0_debug_inst}}; // @[util.scala:505:22, :547:21]
assign out_uop_debug_inst = _GEN_4[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_5 = {{uops_0_is_rvc}, {uops_14_is_rvc}, {uops_13_is_rvc}, {uops_12_is_rvc}, {uops_11_is_rvc}, {uops_10_is_rvc}, {uops_9_is_rvc}, {uops_8_is_rvc}, {uops_7_is_rvc}, {uops_6_is_rvc}, {uops_5_is_rvc}, {uops_4_is_rvc}, {uops_3_is_rvc}, {uops_2_is_rvc}, {uops_1_is_rvc}, {uops_0_is_rvc}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_rvc = _GEN_5[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][33:0] _GEN_6 = {{uops_0_debug_pc}, {uops_14_debug_pc}, {uops_13_debug_pc}, {uops_12_debug_pc}, {uops_11_debug_pc}, {uops_10_debug_pc}, {uops_9_debug_pc}, {uops_8_debug_pc}, {uops_7_debug_pc}, {uops_6_debug_pc}, {uops_5_debug_pc}, {uops_4_debug_pc}, {uops_3_debug_pc}, {uops_2_debug_pc}, {uops_1_debug_pc}, {uops_0_debug_pc}}; // @[util.scala:505:22, :547:21]
assign out_uop_debug_pc = _GEN_6[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_7 = {{uops_0_iq_type_0}, {uops_14_iq_type_0}, {uops_13_iq_type_0}, {uops_12_iq_type_0}, {uops_11_iq_type_0}, {uops_10_iq_type_0}, {uops_9_iq_type_0}, {uops_8_iq_type_0}, {uops_7_iq_type_0}, {uops_6_iq_type_0}, {uops_5_iq_type_0}, {uops_4_iq_type_0}, {uops_3_iq_type_0}, {uops_2_iq_type_0}, {uops_1_iq_type_0}, {uops_0_iq_type_0}}; // @[util.scala:505:22, :547:21]
assign out_uop_iq_type_0 = _GEN_7[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_8 = {{uops_0_iq_type_1}, {uops_14_iq_type_1}, {uops_13_iq_type_1}, {uops_12_iq_type_1}, {uops_11_iq_type_1}, {uops_10_iq_type_1}, {uops_9_iq_type_1}, {uops_8_iq_type_1}, {uops_7_iq_type_1}, {uops_6_iq_type_1}, {uops_5_iq_type_1}, {uops_4_iq_type_1}, {uops_3_iq_type_1}, {uops_2_iq_type_1}, {uops_1_iq_type_1}, {uops_0_iq_type_1}}; // @[util.scala:505:22, :547:21]
assign out_uop_iq_type_1 = _GEN_8[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_9 = {{uops_0_iq_type_2}, {uops_14_iq_type_2}, {uops_13_iq_type_2}, {uops_12_iq_type_2}, {uops_11_iq_type_2}, {uops_10_iq_type_2}, {uops_9_iq_type_2}, {uops_8_iq_type_2}, {uops_7_iq_type_2}, {uops_6_iq_type_2}, {uops_5_iq_type_2}, {uops_4_iq_type_2}, {uops_3_iq_type_2}, {uops_2_iq_type_2}, {uops_1_iq_type_2}, {uops_0_iq_type_2}}; // @[util.scala:505:22, :547:21]
assign out_uop_iq_type_2 = _GEN_9[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_10 = {{uops_0_iq_type_3}, {uops_14_iq_type_3}, {uops_13_iq_type_3}, {uops_12_iq_type_3}, {uops_11_iq_type_3}, {uops_10_iq_type_3}, {uops_9_iq_type_3}, {uops_8_iq_type_3}, {uops_7_iq_type_3}, {uops_6_iq_type_3}, {uops_5_iq_type_3}, {uops_4_iq_type_3}, {uops_3_iq_type_3}, {uops_2_iq_type_3}, {uops_1_iq_type_3}, {uops_0_iq_type_3}}; // @[util.scala:505:22, :547:21]
assign out_uop_iq_type_3 = _GEN_10[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_11 = {{uops_0_fu_code_0}, {uops_14_fu_code_0}, {uops_13_fu_code_0}, {uops_12_fu_code_0}, {uops_11_fu_code_0}, {uops_10_fu_code_0}, {uops_9_fu_code_0}, {uops_8_fu_code_0}, {uops_7_fu_code_0}, {uops_6_fu_code_0}, {uops_5_fu_code_0}, {uops_4_fu_code_0}, {uops_3_fu_code_0}, {uops_2_fu_code_0}, {uops_1_fu_code_0}, {uops_0_fu_code_0}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_0 = _GEN_11[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_12 = {{uops_0_fu_code_1}, {uops_14_fu_code_1}, {uops_13_fu_code_1}, {uops_12_fu_code_1}, {uops_11_fu_code_1}, {uops_10_fu_code_1}, {uops_9_fu_code_1}, {uops_8_fu_code_1}, {uops_7_fu_code_1}, {uops_6_fu_code_1}, {uops_5_fu_code_1}, {uops_4_fu_code_1}, {uops_3_fu_code_1}, {uops_2_fu_code_1}, {uops_1_fu_code_1}, {uops_0_fu_code_1}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_1 = _GEN_12[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_13 = {{uops_0_fu_code_2}, {uops_14_fu_code_2}, {uops_13_fu_code_2}, {uops_12_fu_code_2}, {uops_11_fu_code_2}, {uops_10_fu_code_2}, {uops_9_fu_code_2}, {uops_8_fu_code_2}, {uops_7_fu_code_2}, {uops_6_fu_code_2}, {uops_5_fu_code_2}, {uops_4_fu_code_2}, {uops_3_fu_code_2}, {uops_2_fu_code_2}, {uops_1_fu_code_2}, {uops_0_fu_code_2}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_2 = _GEN_13[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_14 = {{uops_0_fu_code_3}, {uops_14_fu_code_3}, {uops_13_fu_code_3}, {uops_12_fu_code_3}, {uops_11_fu_code_3}, {uops_10_fu_code_3}, {uops_9_fu_code_3}, {uops_8_fu_code_3}, {uops_7_fu_code_3}, {uops_6_fu_code_3}, {uops_5_fu_code_3}, {uops_4_fu_code_3}, {uops_3_fu_code_3}, {uops_2_fu_code_3}, {uops_1_fu_code_3}, {uops_0_fu_code_3}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_3 = _GEN_14[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_15 = {{uops_0_fu_code_4}, {uops_14_fu_code_4}, {uops_13_fu_code_4}, {uops_12_fu_code_4}, {uops_11_fu_code_4}, {uops_10_fu_code_4}, {uops_9_fu_code_4}, {uops_8_fu_code_4}, {uops_7_fu_code_4}, {uops_6_fu_code_4}, {uops_5_fu_code_4}, {uops_4_fu_code_4}, {uops_3_fu_code_4}, {uops_2_fu_code_4}, {uops_1_fu_code_4}, {uops_0_fu_code_4}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_4 = _GEN_15[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_16 = {{uops_0_fu_code_5}, {uops_14_fu_code_5}, {uops_13_fu_code_5}, {uops_12_fu_code_5}, {uops_11_fu_code_5}, {uops_10_fu_code_5}, {uops_9_fu_code_5}, {uops_8_fu_code_5}, {uops_7_fu_code_5}, {uops_6_fu_code_5}, {uops_5_fu_code_5}, {uops_4_fu_code_5}, {uops_3_fu_code_5}, {uops_2_fu_code_5}, {uops_1_fu_code_5}, {uops_0_fu_code_5}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_5 = _GEN_16[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_17 = {{uops_0_fu_code_6}, {uops_14_fu_code_6}, {uops_13_fu_code_6}, {uops_12_fu_code_6}, {uops_11_fu_code_6}, {uops_10_fu_code_6}, {uops_9_fu_code_6}, {uops_8_fu_code_6}, {uops_7_fu_code_6}, {uops_6_fu_code_6}, {uops_5_fu_code_6}, {uops_4_fu_code_6}, {uops_3_fu_code_6}, {uops_2_fu_code_6}, {uops_1_fu_code_6}, {uops_0_fu_code_6}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_6 = _GEN_17[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_18 = {{uops_0_fu_code_7}, {uops_14_fu_code_7}, {uops_13_fu_code_7}, {uops_12_fu_code_7}, {uops_11_fu_code_7}, {uops_10_fu_code_7}, {uops_9_fu_code_7}, {uops_8_fu_code_7}, {uops_7_fu_code_7}, {uops_6_fu_code_7}, {uops_5_fu_code_7}, {uops_4_fu_code_7}, {uops_3_fu_code_7}, {uops_2_fu_code_7}, {uops_1_fu_code_7}, {uops_0_fu_code_7}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_7 = _GEN_18[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_19 = {{uops_0_fu_code_8}, {uops_14_fu_code_8}, {uops_13_fu_code_8}, {uops_12_fu_code_8}, {uops_11_fu_code_8}, {uops_10_fu_code_8}, {uops_9_fu_code_8}, {uops_8_fu_code_8}, {uops_7_fu_code_8}, {uops_6_fu_code_8}, {uops_5_fu_code_8}, {uops_4_fu_code_8}, {uops_3_fu_code_8}, {uops_2_fu_code_8}, {uops_1_fu_code_8}, {uops_0_fu_code_8}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_8 = _GEN_19[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_20 = {{uops_0_fu_code_9}, {uops_14_fu_code_9}, {uops_13_fu_code_9}, {uops_12_fu_code_9}, {uops_11_fu_code_9}, {uops_10_fu_code_9}, {uops_9_fu_code_9}, {uops_8_fu_code_9}, {uops_7_fu_code_9}, {uops_6_fu_code_9}, {uops_5_fu_code_9}, {uops_4_fu_code_9}, {uops_3_fu_code_9}, {uops_2_fu_code_9}, {uops_1_fu_code_9}, {uops_0_fu_code_9}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_9 = _GEN_20[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_21 = {{uops_0_iw_issued}, {uops_14_iw_issued}, {uops_13_iw_issued}, {uops_12_iw_issued}, {uops_11_iw_issued}, {uops_10_iw_issued}, {uops_9_iw_issued}, {uops_8_iw_issued}, {uops_7_iw_issued}, {uops_6_iw_issued}, {uops_5_iw_issued}, {uops_4_iw_issued}, {uops_3_iw_issued}, {uops_2_iw_issued}, {uops_1_iw_issued}, {uops_0_iw_issued}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_issued = _GEN_21[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_22 = {{uops_0_iw_issued_partial_agen}, {uops_14_iw_issued_partial_agen}, {uops_13_iw_issued_partial_agen}, {uops_12_iw_issued_partial_agen}, {uops_11_iw_issued_partial_agen}, {uops_10_iw_issued_partial_agen}, {uops_9_iw_issued_partial_agen}, {uops_8_iw_issued_partial_agen}, {uops_7_iw_issued_partial_agen}, {uops_6_iw_issued_partial_agen}, {uops_5_iw_issued_partial_agen}, {uops_4_iw_issued_partial_agen}, {uops_3_iw_issued_partial_agen}, {uops_2_iw_issued_partial_agen}, {uops_1_iw_issued_partial_agen}, {uops_0_iw_issued_partial_agen}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_issued_partial_agen = _GEN_22[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_23 = {{uops_0_iw_issued_partial_dgen}, {uops_14_iw_issued_partial_dgen}, {uops_13_iw_issued_partial_dgen}, {uops_12_iw_issued_partial_dgen}, {uops_11_iw_issued_partial_dgen}, {uops_10_iw_issued_partial_dgen}, {uops_9_iw_issued_partial_dgen}, {uops_8_iw_issued_partial_dgen}, {uops_7_iw_issued_partial_dgen}, {uops_6_iw_issued_partial_dgen}, {uops_5_iw_issued_partial_dgen}, {uops_4_iw_issued_partial_dgen}, {uops_3_iw_issued_partial_dgen}, {uops_2_iw_issued_partial_dgen}, {uops_1_iw_issued_partial_dgen}, {uops_0_iw_issued_partial_dgen}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_issued_partial_dgen = _GEN_23[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_24 = {{uops_0_iw_p1_speculative_child}, {uops_14_iw_p1_speculative_child}, {uops_13_iw_p1_speculative_child}, {uops_12_iw_p1_speculative_child}, {uops_11_iw_p1_speculative_child}, {uops_10_iw_p1_speculative_child}, {uops_9_iw_p1_speculative_child}, {uops_8_iw_p1_speculative_child}, {uops_7_iw_p1_speculative_child}, {uops_6_iw_p1_speculative_child}, {uops_5_iw_p1_speculative_child}, {uops_4_iw_p1_speculative_child}, {uops_3_iw_p1_speculative_child}, {uops_2_iw_p1_speculative_child}, {uops_1_iw_p1_speculative_child}, {uops_0_iw_p1_speculative_child}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p1_speculative_child = _GEN_24[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_25 = {{uops_0_iw_p2_speculative_child}, {uops_14_iw_p2_speculative_child}, {uops_13_iw_p2_speculative_child}, {uops_12_iw_p2_speculative_child}, {uops_11_iw_p2_speculative_child}, {uops_10_iw_p2_speculative_child}, {uops_9_iw_p2_speculative_child}, {uops_8_iw_p2_speculative_child}, {uops_7_iw_p2_speculative_child}, {uops_6_iw_p2_speculative_child}, {uops_5_iw_p2_speculative_child}, {uops_4_iw_p2_speculative_child}, {uops_3_iw_p2_speculative_child}, {uops_2_iw_p2_speculative_child}, {uops_1_iw_p2_speculative_child}, {uops_0_iw_p2_speculative_child}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p2_speculative_child = _GEN_25[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_26 = {{uops_0_iw_p1_bypass_hint}, {uops_14_iw_p1_bypass_hint}, {uops_13_iw_p1_bypass_hint}, {uops_12_iw_p1_bypass_hint}, {uops_11_iw_p1_bypass_hint}, {uops_10_iw_p1_bypass_hint}, {uops_9_iw_p1_bypass_hint}, {uops_8_iw_p1_bypass_hint}, {uops_7_iw_p1_bypass_hint}, {uops_6_iw_p1_bypass_hint}, {uops_5_iw_p1_bypass_hint}, {uops_4_iw_p1_bypass_hint}, {uops_3_iw_p1_bypass_hint}, {uops_2_iw_p1_bypass_hint}, {uops_1_iw_p1_bypass_hint}, {uops_0_iw_p1_bypass_hint}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p1_bypass_hint = _GEN_26[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_27 = {{uops_0_iw_p2_bypass_hint}, {uops_14_iw_p2_bypass_hint}, {uops_13_iw_p2_bypass_hint}, {uops_12_iw_p2_bypass_hint}, {uops_11_iw_p2_bypass_hint}, {uops_10_iw_p2_bypass_hint}, {uops_9_iw_p2_bypass_hint}, {uops_8_iw_p2_bypass_hint}, {uops_7_iw_p2_bypass_hint}, {uops_6_iw_p2_bypass_hint}, {uops_5_iw_p2_bypass_hint}, {uops_4_iw_p2_bypass_hint}, {uops_3_iw_p2_bypass_hint}, {uops_2_iw_p2_bypass_hint}, {uops_1_iw_p2_bypass_hint}, {uops_0_iw_p2_bypass_hint}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p2_bypass_hint = _GEN_27[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_28 = {{uops_0_iw_p3_bypass_hint}, {uops_14_iw_p3_bypass_hint}, {uops_13_iw_p3_bypass_hint}, {uops_12_iw_p3_bypass_hint}, {uops_11_iw_p3_bypass_hint}, {uops_10_iw_p3_bypass_hint}, {uops_9_iw_p3_bypass_hint}, {uops_8_iw_p3_bypass_hint}, {uops_7_iw_p3_bypass_hint}, {uops_6_iw_p3_bypass_hint}, {uops_5_iw_p3_bypass_hint}, {uops_4_iw_p3_bypass_hint}, {uops_3_iw_p3_bypass_hint}, {uops_2_iw_p3_bypass_hint}, {uops_1_iw_p3_bypass_hint}, {uops_0_iw_p3_bypass_hint}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p3_bypass_hint = _GEN_28[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_29 = {{uops_0_dis_col_sel}, {uops_14_dis_col_sel}, {uops_13_dis_col_sel}, {uops_12_dis_col_sel}, {uops_11_dis_col_sel}, {uops_10_dis_col_sel}, {uops_9_dis_col_sel}, {uops_8_dis_col_sel}, {uops_7_dis_col_sel}, {uops_6_dis_col_sel}, {uops_5_dis_col_sel}, {uops_4_dis_col_sel}, {uops_3_dis_col_sel}, {uops_2_dis_col_sel}, {uops_1_dis_col_sel}, {uops_0_dis_col_sel}}; // @[util.scala:505:22, :547:21]
assign out_uop_dis_col_sel = _GEN_29[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][3:0] _GEN_30 = {{uops_0_br_mask}, {uops_14_br_mask}, {uops_13_br_mask}, {uops_12_br_mask}, {uops_11_br_mask}, {uops_10_br_mask}, {uops_9_br_mask}, {uops_8_br_mask}, {uops_7_br_mask}, {uops_6_br_mask}, {uops_5_br_mask}, {uops_4_br_mask}, {uops_3_br_mask}, {uops_2_br_mask}, {uops_1_br_mask}, {uops_0_br_mask}}; // @[util.scala:505:22, :547:21]
assign out_uop_br_mask = _GEN_30[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_31 = {{uops_0_br_tag}, {uops_14_br_tag}, {uops_13_br_tag}, {uops_12_br_tag}, {uops_11_br_tag}, {uops_10_br_tag}, {uops_9_br_tag}, {uops_8_br_tag}, {uops_7_br_tag}, {uops_6_br_tag}, {uops_5_br_tag}, {uops_4_br_tag}, {uops_3_br_tag}, {uops_2_br_tag}, {uops_1_br_tag}, {uops_0_br_tag}}; // @[util.scala:505:22, :547:21]
assign out_uop_br_tag = _GEN_31[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][3:0] _GEN_32 = {{uops_0_br_type}, {uops_14_br_type}, {uops_13_br_type}, {uops_12_br_type}, {uops_11_br_type}, {uops_10_br_type}, {uops_9_br_type}, {uops_8_br_type}, {uops_7_br_type}, {uops_6_br_type}, {uops_5_br_type}, {uops_4_br_type}, {uops_3_br_type}, {uops_2_br_type}, {uops_1_br_type}, {uops_0_br_type}}; // @[util.scala:505:22, :547:21]
assign out_uop_br_type = _GEN_32[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_33 = {{uops_0_is_sfb}, {uops_14_is_sfb}, {uops_13_is_sfb}, {uops_12_is_sfb}, {uops_11_is_sfb}, {uops_10_is_sfb}, {uops_9_is_sfb}, {uops_8_is_sfb}, {uops_7_is_sfb}, {uops_6_is_sfb}, {uops_5_is_sfb}, {uops_4_is_sfb}, {uops_3_is_sfb}, {uops_2_is_sfb}, {uops_1_is_sfb}, {uops_0_is_sfb}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_sfb = _GEN_33[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_34 = {{uops_0_is_fence}, {uops_14_is_fence}, {uops_13_is_fence}, {uops_12_is_fence}, {uops_11_is_fence}, {uops_10_is_fence}, {uops_9_is_fence}, {uops_8_is_fence}, {uops_7_is_fence}, {uops_6_is_fence}, {uops_5_is_fence}, {uops_4_is_fence}, {uops_3_is_fence}, {uops_2_is_fence}, {uops_1_is_fence}, {uops_0_is_fence}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_fence = _GEN_34[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_35 = {{uops_0_is_fencei}, {uops_14_is_fencei}, {uops_13_is_fencei}, {uops_12_is_fencei}, {uops_11_is_fencei}, {uops_10_is_fencei}, {uops_9_is_fencei}, {uops_8_is_fencei}, {uops_7_is_fencei}, {uops_6_is_fencei}, {uops_5_is_fencei}, {uops_4_is_fencei}, {uops_3_is_fencei}, {uops_2_is_fencei}, {uops_1_is_fencei}, {uops_0_is_fencei}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_fencei = _GEN_35[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_36 = {{uops_0_is_sfence}, {uops_14_is_sfence}, {uops_13_is_sfence}, {uops_12_is_sfence}, {uops_11_is_sfence}, {uops_10_is_sfence}, {uops_9_is_sfence}, {uops_8_is_sfence}, {uops_7_is_sfence}, {uops_6_is_sfence}, {uops_5_is_sfence}, {uops_4_is_sfence}, {uops_3_is_sfence}, {uops_2_is_sfence}, {uops_1_is_sfence}, {uops_0_is_sfence}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_sfence = _GEN_36[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_37 = {{uops_0_is_amo}, {uops_14_is_amo}, {uops_13_is_amo}, {uops_12_is_amo}, {uops_11_is_amo}, {uops_10_is_amo}, {uops_9_is_amo}, {uops_8_is_amo}, {uops_7_is_amo}, {uops_6_is_amo}, {uops_5_is_amo}, {uops_4_is_amo}, {uops_3_is_amo}, {uops_2_is_amo}, {uops_1_is_amo}, {uops_0_is_amo}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_amo = _GEN_37[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_38 = {{uops_0_is_eret}, {uops_14_is_eret}, {uops_13_is_eret}, {uops_12_is_eret}, {uops_11_is_eret}, {uops_10_is_eret}, {uops_9_is_eret}, {uops_8_is_eret}, {uops_7_is_eret}, {uops_6_is_eret}, {uops_5_is_eret}, {uops_4_is_eret}, {uops_3_is_eret}, {uops_2_is_eret}, {uops_1_is_eret}, {uops_0_is_eret}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_eret = _GEN_38[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_39 = {{uops_0_is_sys_pc2epc}, {uops_14_is_sys_pc2epc}, {uops_13_is_sys_pc2epc}, {uops_12_is_sys_pc2epc}, {uops_11_is_sys_pc2epc}, {uops_10_is_sys_pc2epc}, {uops_9_is_sys_pc2epc}, {uops_8_is_sys_pc2epc}, {uops_7_is_sys_pc2epc}, {uops_6_is_sys_pc2epc}, {uops_5_is_sys_pc2epc}, {uops_4_is_sys_pc2epc}, {uops_3_is_sys_pc2epc}, {uops_2_is_sys_pc2epc}, {uops_1_is_sys_pc2epc}, {uops_0_is_sys_pc2epc}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_sys_pc2epc = _GEN_39[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_40 = {{uops_0_is_rocc}, {uops_14_is_rocc}, {uops_13_is_rocc}, {uops_12_is_rocc}, {uops_11_is_rocc}, {uops_10_is_rocc}, {uops_9_is_rocc}, {uops_8_is_rocc}, {uops_7_is_rocc}, {uops_6_is_rocc}, {uops_5_is_rocc}, {uops_4_is_rocc}, {uops_3_is_rocc}, {uops_2_is_rocc}, {uops_1_is_rocc}, {uops_0_is_rocc}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_rocc = _GEN_40[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_41 = {{uops_0_is_mov}, {uops_14_is_mov}, {uops_13_is_mov}, {uops_12_is_mov}, {uops_11_is_mov}, {uops_10_is_mov}, {uops_9_is_mov}, {uops_8_is_mov}, {uops_7_is_mov}, {uops_6_is_mov}, {uops_5_is_mov}, {uops_4_is_mov}, {uops_3_is_mov}, {uops_2_is_mov}, {uops_1_is_mov}, {uops_0_is_mov}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_mov = _GEN_41[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][3:0] _GEN_42 = {{uops_0_ftq_idx}, {uops_14_ftq_idx}, {uops_13_ftq_idx}, {uops_12_ftq_idx}, {uops_11_ftq_idx}, {uops_10_ftq_idx}, {uops_9_ftq_idx}, {uops_8_ftq_idx}, {uops_7_ftq_idx}, {uops_6_ftq_idx}, {uops_5_ftq_idx}, {uops_4_ftq_idx}, {uops_3_ftq_idx}, {uops_2_ftq_idx}, {uops_1_ftq_idx}, {uops_0_ftq_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_ftq_idx = _GEN_42[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_43 = {{uops_0_edge_inst}, {uops_14_edge_inst}, {uops_13_edge_inst}, {uops_12_edge_inst}, {uops_11_edge_inst}, {uops_10_edge_inst}, {uops_9_edge_inst}, {uops_8_edge_inst}, {uops_7_edge_inst}, {uops_6_edge_inst}, {uops_5_edge_inst}, {uops_4_edge_inst}, {uops_3_edge_inst}, {uops_2_edge_inst}, {uops_1_edge_inst}, {uops_0_edge_inst}}; // @[util.scala:505:22, :547:21]
assign out_uop_edge_inst = _GEN_43[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_44 = {{uops_0_pc_lob}, {uops_14_pc_lob}, {uops_13_pc_lob}, {uops_12_pc_lob}, {uops_11_pc_lob}, {uops_10_pc_lob}, {uops_9_pc_lob}, {uops_8_pc_lob}, {uops_7_pc_lob}, {uops_6_pc_lob}, {uops_5_pc_lob}, {uops_4_pc_lob}, {uops_3_pc_lob}, {uops_2_pc_lob}, {uops_1_pc_lob}, {uops_0_pc_lob}}; // @[util.scala:505:22, :547:21]
assign out_uop_pc_lob = _GEN_44[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_45 = {{uops_0_taken}, {uops_14_taken}, {uops_13_taken}, {uops_12_taken}, {uops_11_taken}, {uops_10_taken}, {uops_9_taken}, {uops_8_taken}, {uops_7_taken}, {uops_6_taken}, {uops_5_taken}, {uops_4_taken}, {uops_3_taken}, {uops_2_taken}, {uops_1_taken}, {uops_0_taken}}; // @[util.scala:505:22, :547:21]
assign out_uop_taken = _GEN_45[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_46 = {{uops_0_imm_rename}, {uops_14_imm_rename}, {uops_13_imm_rename}, {uops_12_imm_rename}, {uops_11_imm_rename}, {uops_10_imm_rename}, {uops_9_imm_rename}, {uops_8_imm_rename}, {uops_7_imm_rename}, {uops_6_imm_rename}, {uops_5_imm_rename}, {uops_4_imm_rename}, {uops_3_imm_rename}, {uops_2_imm_rename}, {uops_1_imm_rename}, {uops_0_imm_rename}}; // @[util.scala:505:22, :547:21]
assign out_uop_imm_rename = _GEN_46[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][2:0] _GEN_47 = {{uops_0_imm_sel}, {uops_14_imm_sel}, {uops_13_imm_sel}, {uops_12_imm_sel}, {uops_11_imm_sel}, {uops_10_imm_sel}, {uops_9_imm_sel}, {uops_8_imm_sel}, {uops_7_imm_sel}, {uops_6_imm_sel}, {uops_5_imm_sel}, {uops_4_imm_sel}, {uops_3_imm_sel}, {uops_2_imm_sel}, {uops_1_imm_sel}, {uops_0_imm_sel}}; // @[util.scala:505:22, :547:21]
assign out_uop_imm_sel = _GEN_47[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][4:0] _GEN_48 = {{uops_0_pimm}, {uops_14_pimm}, {uops_13_pimm}, {uops_12_pimm}, {uops_11_pimm}, {uops_10_pimm}, {uops_9_pimm}, {uops_8_pimm}, {uops_7_pimm}, {uops_6_pimm}, {uops_5_pimm}, {uops_4_pimm}, {uops_3_pimm}, {uops_2_pimm}, {uops_1_pimm}, {uops_0_pimm}}; // @[util.scala:505:22, :547:21]
assign out_uop_pimm = _GEN_48[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][19:0] _GEN_49 = {{uops_0_imm_packed}, {uops_14_imm_packed}, {uops_13_imm_packed}, {uops_12_imm_packed}, {uops_11_imm_packed}, {uops_10_imm_packed}, {uops_9_imm_packed}, {uops_8_imm_packed}, {uops_7_imm_packed}, {uops_6_imm_packed}, {uops_5_imm_packed}, {uops_4_imm_packed}, {uops_3_imm_packed}, {uops_2_imm_packed}, {uops_1_imm_packed}, {uops_0_imm_packed}}; // @[util.scala:505:22, :547:21]
assign out_uop_imm_packed = _GEN_49[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_50 = {{uops_0_op1_sel}, {uops_14_op1_sel}, {uops_13_op1_sel}, {uops_12_op1_sel}, {uops_11_op1_sel}, {uops_10_op1_sel}, {uops_9_op1_sel}, {uops_8_op1_sel}, {uops_7_op1_sel}, {uops_6_op1_sel}, {uops_5_op1_sel}, {uops_4_op1_sel}, {uops_3_op1_sel}, {uops_2_op1_sel}, {uops_1_op1_sel}, {uops_0_op1_sel}}; // @[util.scala:505:22, :547:21]
assign out_uop_op1_sel = _GEN_50[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][2:0] _GEN_51 = {{uops_0_op2_sel}, {uops_14_op2_sel}, {uops_13_op2_sel}, {uops_12_op2_sel}, {uops_11_op2_sel}, {uops_10_op2_sel}, {uops_9_op2_sel}, {uops_8_op2_sel}, {uops_7_op2_sel}, {uops_6_op2_sel}, {uops_5_op2_sel}, {uops_4_op2_sel}, {uops_3_op2_sel}, {uops_2_op2_sel}, {uops_1_op2_sel}, {uops_0_op2_sel}}; // @[util.scala:505:22, :547:21]
assign out_uop_op2_sel = _GEN_51[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_52 = {{uops_0_fp_ctrl_ldst}, {uops_14_fp_ctrl_ldst}, {uops_13_fp_ctrl_ldst}, {uops_12_fp_ctrl_ldst}, {uops_11_fp_ctrl_ldst}, {uops_10_fp_ctrl_ldst}, {uops_9_fp_ctrl_ldst}, {uops_8_fp_ctrl_ldst}, {uops_7_fp_ctrl_ldst}, {uops_6_fp_ctrl_ldst}, {uops_5_fp_ctrl_ldst}, {uops_4_fp_ctrl_ldst}, {uops_3_fp_ctrl_ldst}, {uops_2_fp_ctrl_ldst}, {uops_1_fp_ctrl_ldst}, {uops_0_fp_ctrl_ldst}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_ldst = _GEN_52[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_53 = {{uops_0_fp_ctrl_wen}, {uops_14_fp_ctrl_wen}, {uops_13_fp_ctrl_wen}, {uops_12_fp_ctrl_wen}, {uops_11_fp_ctrl_wen}, {uops_10_fp_ctrl_wen}, {uops_9_fp_ctrl_wen}, {uops_8_fp_ctrl_wen}, {uops_7_fp_ctrl_wen}, {uops_6_fp_ctrl_wen}, {uops_5_fp_ctrl_wen}, {uops_4_fp_ctrl_wen}, {uops_3_fp_ctrl_wen}, {uops_2_fp_ctrl_wen}, {uops_1_fp_ctrl_wen}, {uops_0_fp_ctrl_wen}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_wen = _GEN_53[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_54 = {{uops_0_fp_ctrl_ren1}, {uops_14_fp_ctrl_ren1}, {uops_13_fp_ctrl_ren1}, {uops_12_fp_ctrl_ren1}, {uops_11_fp_ctrl_ren1}, {uops_10_fp_ctrl_ren1}, {uops_9_fp_ctrl_ren1}, {uops_8_fp_ctrl_ren1}, {uops_7_fp_ctrl_ren1}, {uops_6_fp_ctrl_ren1}, {uops_5_fp_ctrl_ren1}, {uops_4_fp_ctrl_ren1}, {uops_3_fp_ctrl_ren1}, {uops_2_fp_ctrl_ren1}, {uops_1_fp_ctrl_ren1}, {uops_0_fp_ctrl_ren1}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_ren1 = _GEN_54[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_55 = {{uops_0_fp_ctrl_ren2}, {uops_14_fp_ctrl_ren2}, {uops_13_fp_ctrl_ren2}, {uops_12_fp_ctrl_ren2}, {uops_11_fp_ctrl_ren2}, {uops_10_fp_ctrl_ren2}, {uops_9_fp_ctrl_ren2}, {uops_8_fp_ctrl_ren2}, {uops_7_fp_ctrl_ren2}, {uops_6_fp_ctrl_ren2}, {uops_5_fp_ctrl_ren2}, {uops_4_fp_ctrl_ren2}, {uops_3_fp_ctrl_ren2}, {uops_2_fp_ctrl_ren2}, {uops_1_fp_ctrl_ren2}, {uops_0_fp_ctrl_ren2}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_ren2 = _GEN_55[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_56 = {{uops_0_fp_ctrl_ren3}, {uops_14_fp_ctrl_ren3}, {uops_13_fp_ctrl_ren3}, {uops_12_fp_ctrl_ren3}, {uops_11_fp_ctrl_ren3}, {uops_10_fp_ctrl_ren3}, {uops_9_fp_ctrl_ren3}, {uops_8_fp_ctrl_ren3}, {uops_7_fp_ctrl_ren3}, {uops_6_fp_ctrl_ren3}, {uops_5_fp_ctrl_ren3}, {uops_4_fp_ctrl_ren3}, {uops_3_fp_ctrl_ren3}, {uops_2_fp_ctrl_ren3}, {uops_1_fp_ctrl_ren3}, {uops_0_fp_ctrl_ren3}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_ren3 = _GEN_56[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_57 = {{uops_0_fp_ctrl_swap12}, {uops_14_fp_ctrl_swap12}, {uops_13_fp_ctrl_swap12}, {uops_12_fp_ctrl_swap12}, {uops_11_fp_ctrl_swap12}, {uops_10_fp_ctrl_swap12}, {uops_9_fp_ctrl_swap12}, {uops_8_fp_ctrl_swap12}, {uops_7_fp_ctrl_swap12}, {uops_6_fp_ctrl_swap12}, {uops_5_fp_ctrl_swap12}, {uops_4_fp_ctrl_swap12}, {uops_3_fp_ctrl_swap12}, {uops_2_fp_ctrl_swap12}, {uops_1_fp_ctrl_swap12}, {uops_0_fp_ctrl_swap12}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_swap12 = _GEN_57[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_58 = {{uops_0_fp_ctrl_swap23}, {uops_14_fp_ctrl_swap23}, {uops_13_fp_ctrl_swap23}, {uops_12_fp_ctrl_swap23}, {uops_11_fp_ctrl_swap23}, {uops_10_fp_ctrl_swap23}, {uops_9_fp_ctrl_swap23}, {uops_8_fp_ctrl_swap23}, {uops_7_fp_ctrl_swap23}, {uops_6_fp_ctrl_swap23}, {uops_5_fp_ctrl_swap23}, {uops_4_fp_ctrl_swap23}, {uops_3_fp_ctrl_swap23}, {uops_2_fp_ctrl_swap23}, {uops_1_fp_ctrl_swap23}, {uops_0_fp_ctrl_swap23}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_swap23 = _GEN_58[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_59 = {{uops_0_fp_ctrl_typeTagIn}, {uops_14_fp_ctrl_typeTagIn}, {uops_13_fp_ctrl_typeTagIn}, {uops_12_fp_ctrl_typeTagIn}, {uops_11_fp_ctrl_typeTagIn}, {uops_10_fp_ctrl_typeTagIn}, {uops_9_fp_ctrl_typeTagIn}, {uops_8_fp_ctrl_typeTagIn}, {uops_7_fp_ctrl_typeTagIn}, {uops_6_fp_ctrl_typeTagIn}, {uops_5_fp_ctrl_typeTagIn}, {uops_4_fp_ctrl_typeTagIn}, {uops_3_fp_ctrl_typeTagIn}, {uops_2_fp_ctrl_typeTagIn}, {uops_1_fp_ctrl_typeTagIn}, {uops_0_fp_ctrl_typeTagIn}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_typeTagIn = _GEN_59[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_60 = {{uops_0_fp_ctrl_typeTagOut}, {uops_14_fp_ctrl_typeTagOut}, {uops_13_fp_ctrl_typeTagOut}, {uops_12_fp_ctrl_typeTagOut}, {uops_11_fp_ctrl_typeTagOut}, {uops_10_fp_ctrl_typeTagOut}, {uops_9_fp_ctrl_typeTagOut}, {uops_8_fp_ctrl_typeTagOut}, {uops_7_fp_ctrl_typeTagOut}, {uops_6_fp_ctrl_typeTagOut}, {uops_5_fp_ctrl_typeTagOut}, {uops_4_fp_ctrl_typeTagOut}, {uops_3_fp_ctrl_typeTagOut}, {uops_2_fp_ctrl_typeTagOut}, {uops_1_fp_ctrl_typeTagOut}, {uops_0_fp_ctrl_typeTagOut}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_typeTagOut = _GEN_60[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_61 = {{uops_0_fp_ctrl_fromint}, {uops_14_fp_ctrl_fromint}, {uops_13_fp_ctrl_fromint}, {uops_12_fp_ctrl_fromint}, {uops_11_fp_ctrl_fromint}, {uops_10_fp_ctrl_fromint}, {uops_9_fp_ctrl_fromint}, {uops_8_fp_ctrl_fromint}, {uops_7_fp_ctrl_fromint}, {uops_6_fp_ctrl_fromint}, {uops_5_fp_ctrl_fromint}, {uops_4_fp_ctrl_fromint}, {uops_3_fp_ctrl_fromint}, {uops_2_fp_ctrl_fromint}, {uops_1_fp_ctrl_fromint}, {uops_0_fp_ctrl_fromint}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_fromint = _GEN_61[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_62 = {{uops_0_fp_ctrl_toint}, {uops_14_fp_ctrl_toint}, {uops_13_fp_ctrl_toint}, {uops_12_fp_ctrl_toint}, {uops_11_fp_ctrl_toint}, {uops_10_fp_ctrl_toint}, {uops_9_fp_ctrl_toint}, {uops_8_fp_ctrl_toint}, {uops_7_fp_ctrl_toint}, {uops_6_fp_ctrl_toint}, {uops_5_fp_ctrl_toint}, {uops_4_fp_ctrl_toint}, {uops_3_fp_ctrl_toint}, {uops_2_fp_ctrl_toint}, {uops_1_fp_ctrl_toint}, {uops_0_fp_ctrl_toint}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_toint = _GEN_62[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_63 = {{uops_0_fp_ctrl_fastpipe}, {uops_14_fp_ctrl_fastpipe}, {uops_13_fp_ctrl_fastpipe}, {uops_12_fp_ctrl_fastpipe}, {uops_11_fp_ctrl_fastpipe}, {uops_10_fp_ctrl_fastpipe}, {uops_9_fp_ctrl_fastpipe}, {uops_8_fp_ctrl_fastpipe}, {uops_7_fp_ctrl_fastpipe}, {uops_6_fp_ctrl_fastpipe}, {uops_5_fp_ctrl_fastpipe}, {uops_4_fp_ctrl_fastpipe}, {uops_3_fp_ctrl_fastpipe}, {uops_2_fp_ctrl_fastpipe}, {uops_1_fp_ctrl_fastpipe}, {uops_0_fp_ctrl_fastpipe}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_fastpipe = _GEN_63[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_64 = {{uops_0_fp_ctrl_fma}, {uops_14_fp_ctrl_fma}, {uops_13_fp_ctrl_fma}, {uops_12_fp_ctrl_fma}, {uops_11_fp_ctrl_fma}, {uops_10_fp_ctrl_fma}, {uops_9_fp_ctrl_fma}, {uops_8_fp_ctrl_fma}, {uops_7_fp_ctrl_fma}, {uops_6_fp_ctrl_fma}, {uops_5_fp_ctrl_fma}, {uops_4_fp_ctrl_fma}, {uops_3_fp_ctrl_fma}, {uops_2_fp_ctrl_fma}, {uops_1_fp_ctrl_fma}, {uops_0_fp_ctrl_fma}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_fma = _GEN_64[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_65 = {{uops_0_fp_ctrl_div}, {uops_14_fp_ctrl_div}, {uops_13_fp_ctrl_div}, {uops_12_fp_ctrl_div}, {uops_11_fp_ctrl_div}, {uops_10_fp_ctrl_div}, {uops_9_fp_ctrl_div}, {uops_8_fp_ctrl_div}, {uops_7_fp_ctrl_div}, {uops_6_fp_ctrl_div}, {uops_5_fp_ctrl_div}, {uops_4_fp_ctrl_div}, {uops_3_fp_ctrl_div}, {uops_2_fp_ctrl_div}, {uops_1_fp_ctrl_div}, {uops_0_fp_ctrl_div}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_div = _GEN_65[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_66 = {{uops_0_fp_ctrl_sqrt}, {uops_14_fp_ctrl_sqrt}, {uops_13_fp_ctrl_sqrt}, {uops_12_fp_ctrl_sqrt}, {uops_11_fp_ctrl_sqrt}, {uops_10_fp_ctrl_sqrt}, {uops_9_fp_ctrl_sqrt}, {uops_8_fp_ctrl_sqrt}, {uops_7_fp_ctrl_sqrt}, {uops_6_fp_ctrl_sqrt}, {uops_5_fp_ctrl_sqrt}, {uops_4_fp_ctrl_sqrt}, {uops_3_fp_ctrl_sqrt}, {uops_2_fp_ctrl_sqrt}, {uops_1_fp_ctrl_sqrt}, {uops_0_fp_ctrl_sqrt}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_sqrt = _GEN_66[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_67 = {{uops_0_fp_ctrl_wflags}, {uops_14_fp_ctrl_wflags}, {uops_13_fp_ctrl_wflags}, {uops_12_fp_ctrl_wflags}, {uops_11_fp_ctrl_wflags}, {uops_10_fp_ctrl_wflags}, {uops_9_fp_ctrl_wflags}, {uops_8_fp_ctrl_wflags}, {uops_7_fp_ctrl_wflags}, {uops_6_fp_ctrl_wflags}, {uops_5_fp_ctrl_wflags}, {uops_4_fp_ctrl_wflags}, {uops_3_fp_ctrl_wflags}, {uops_2_fp_ctrl_wflags}, {uops_1_fp_ctrl_wflags}, {uops_0_fp_ctrl_wflags}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_wflags = _GEN_67[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_68 = {{uops_0_fp_ctrl_vec}, {uops_14_fp_ctrl_vec}, {uops_13_fp_ctrl_vec}, {uops_12_fp_ctrl_vec}, {uops_11_fp_ctrl_vec}, {uops_10_fp_ctrl_vec}, {uops_9_fp_ctrl_vec}, {uops_8_fp_ctrl_vec}, {uops_7_fp_ctrl_vec}, {uops_6_fp_ctrl_vec}, {uops_5_fp_ctrl_vec}, {uops_4_fp_ctrl_vec}, {uops_3_fp_ctrl_vec}, {uops_2_fp_ctrl_vec}, {uops_1_fp_ctrl_vec}, {uops_0_fp_ctrl_vec}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_vec = _GEN_68[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][4:0] _GEN_69 = {{uops_0_rob_idx}, {uops_14_rob_idx}, {uops_13_rob_idx}, {uops_12_rob_idx}, {uops_11_rob_idx}, {uops_10_rob_idx}, {uops_9_rob_idx}, {uops_8_rob_idx}, {uops_7_rob_idx}, {uops_6_rob_idx}, {uops_5_rob_idx}, {uops_4_rob_idx}, {uops_3_rob_idx}, {uops_2_rob_idx}, {uops_1_rob_idx}, {uops_0_rob_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_rob_idx = _GEN_69[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][3:0] _GEN_70 = {{uops_0_ldq_idx}, {uops_14_ldq_idx}, {uops_13_ldq_idx}, {uops_12_ldq_idx}, {uops_11_ldq_idx}, {uops_10_ldq_idx}, {uops_9_ldq_idx}, {uops_8_ldq_idx}, {uops_7_ldq_idx}, {uops_6_ldq_idx}, {uops_5_ldq_idx}, {uops_4_ldq_idx}, {uops_3_ldq_idx}, {uops_2_ldq_idx}, {uops_1_ldq_idx}, {uops_0_ldq_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_ldq_idx = _GEN_70[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][3:0] _GEN_71 = {{uops_0_stq_idx}, {uops_14_stq_idx}, {uops_13_stq_idx}, {uops_12_stq_idx}, {uops_11_stq_idx}, {uops_10_stq_idx}, {uops_9_stq_idx}, {uops_8_stq_idx}, {uops_7_stq_idx}, {uops_6_stq_idx}, {uops_5_stq_idx}, {uops_4_stq_idx}, {uops_3_stq_idx}, {uops_2_stq_idx}, {uops_1_stq_idx}, {uops_0_stq_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_stq_idx = _GEN_71[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_72 = {{uops_0_rxq_idx}, {uops_14_rxq_idx}, {uops_13_rxq_idx}, {uops_12_rxq_idx}, {uops_11_rxq_idx}, {uops_10_rxq_idx}, {uops_9_rxq_idx}, {uops_8_rxq_idx}, {uops_7_rxq_idx}, {uops_6_rxq_idx}, {uops_5_rxq_idx}, {uops_4_rxq_idx}, {uops_3_rxq_idx}, {uops_2_rxq_idx}, {uops_1_rxq_idx}, {uops_0_rxq_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_rxq_idx = _GEN_72[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_73 = {{uops_0_pdst}, {uops_14_pdst}, {uops_13_pdst}, {uops_12_pdst}, {uops_11_pdst}, {uops_10_pdst}, {uops_9_pdst}, {uops_8_pdst}, {uops_7_pdst}, {uops_6_pdst}, {uops_5_pdst}, {uops_4_pdst}, {uops_3_pdst}, {uops_2_pdst}, {uops_1_pdst}, {uops_0_pdst}}; // @[util.scala:505:22, :547:21]
assign out_uop_pdst = _GEN_73[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_74 = {{uops_0_prs1}, {uops_14_prs1}, {uops_13_prs1}, {uops_12_prs1}, {uops_11_prs1}, {uops_10_prs1}, {uops_9_prs1}, {uops_8_prs1}, {uops_7_prs1}, {uops_6_prs1}, {uops_5_prs1}, {uops_4_prs1}, {uops_3_prs1}, {uops_2_prs1}, {uops_1_prs1}, {uops_0_prs1}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs1 = _GEN_74[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_75 = {{uops_0_prs2}, {uops_14_prs2}, {uops_13_prs2}, {uops_12_prs2}, {uops_11_prs2}, {uops_10_prs2}, {uops_9_prs2}, {uops_8_prs2}, {uops_7_prs2}, {uops_6_prs2}, {uops_5_prs2}, {uops_4_prs2}, {uops_3_prs2}, {uops_2_prs2}, {uops_1_prs2}, {uops_0_prs2}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs2 = _GEN_75[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_76 = {{uops_0_prs3}, {uops_14_prs3}, {uops_13_prs3}, {uops_12_prs3}, {uops_11_prs3}, {uops_10_prs3}, {uops_9_prs3}, {uops_8_prs3}, {uops_7_prs3}, {uops_6_prs3}, {uops_5_prs3}, {uops_4_prs3}, {uops_3_prs3}, {uops_2_prs3}, {uops_1_prs3}, {uops_0_prs3}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs3 = _GEN_76[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][3:0] _GEN_77 = {{uops_0_ppred}, {uops_14_ppred}, {uops_13_ppred}, {uops_12_ppred}, {uops_11_ppred}, {uops_10_ppred}, {uops_9_ppred}, {uops_8_ppred}, {uops_7_ppred}, {uops_6_ppred}, {uops_5_ppred}, {uops_4_ppred}, {uops_3_ppred}, {uops_2_ppred}, {uops_1_ppred}, {uops_0_ppred}}; // @[util.scala:505:22, :547:21]
assign out_uop_ppred = _GEN_77[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_78 = {{uops_0_prs1_busy}, {uops_14_prs1_busy}, {uops_13_prs1_busy}, {uops_12_prs1_busy}, {uops_11_prs1_busy}, {uops_10_prs1_busy}, {uops_9_prs1_busy}, {uops_8_prs1_busy}, {uops_7_prs1_busy}, {uops_6_prs1_busy}, {uops_5_prs1_busy}, {uops_4_prs1_busy}, {uops_3_prs1_busy}, {uops_2_prs1_busy}, {uops_1_prs1_busy}, {uops_0_prs1_busy}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs1_busy = _GEN_78[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_79 = {{uops_0_prs2_busy}, {uops_14_prs2_busy}, {uops_13_prs2_busy}, {uops_12_prs2_busy}, {uops_11_prs2_busy}, {uops_10_prs2_busy}, {uops_9_prs2_busy}, {uops_8_prs2_busy}, {uops_7_prs2_busy}, {uops_6_prs2_busy}, {uops_5_prs2_busy}, {uops_4_prs2_busy}, {uops_3_prs2_busy}, {uops_2_prs2_busy}, {uops_1_prs2_busy}, {uops_0_prs2_busy}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs2_busy = _GEN_79[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_80 = {{uops_0_prs3_busy}, {uops_14_prs3_busy}, {uops_13_prs3_busy}, {uops_12_prs3_busy}, {uops_11_prs3_busy}, {uops_10_prs3_busy}, {uops_9_prs3_busy}, {uops_8_prs3_busy}, {uops_7_prs3_busy}, {uops_6_prs3_busy}, {uops_5_prs3_busy}, {uops_4_prs3_busy}, {uops_3_prs3_busy}, {uops_2_prs3_busy}, {uops_1_prs3_busy}, {uops_0_prs3_busy}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs3_busy = _GEN_80[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_81 = {{uops_0_ppred_busy}, {uops_14_ppred_busy}, {uops_13_ppred_busy}, {uops_12_ppred_busy}, {uops_11_ppred_busy}, {uops_10_ppred_busy}, {uops_9_ppred_busy}, {uops_8_ppred_busy}, {uops_7_ppred_busy}, {uops_6_ppred_busy}, {uops_5_ppred_busy}, {uops_4_ppred_busy}, {uops_3_ppred_busy}, {uops_2_ppred_busy}, {uops_1_ppred_busy}, {uops_0_ppred_busy}}; // @[util.scala:505:22, :547:21]
assign out_uop_ppred_busy = _GEN_81[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_82 = {{uops_0_stale_pdst}, {uops_14_stale_pdst}, {uops_13_stale_pdst}, {uops_12_stale_pdst}, {uops_11_stale_pdst}, {uops_10_stale_pdst}, {uops_9_stale_pdst}, {uops_8_stale_pdst}, {uops_7_stale_pdst}, {uops_6_stale_pdst}, {uops_5_stale_pdst}, {uops_4_stale_pdst}, {uops_3_stale_pdst}, {uops_2_stale_pdst}, {uops_1_stale_pdst}, {uops_0_stale_pdst}}; // @[util.scala:505:22, :547:21]
assign out_uop_stale_pdst = _GEN_82[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_83 = {{uops_0_exception}, {uops_14_exception}, {uops_13_exception}, {uops_12_exception}, {uops_11_exception}, {uops_10_exception}, {uops_9_exception}, {uops_8_exception}, {uops_7_exception}, {uops_6_exception}, {uops_5_exception}, {uops_4_exception}, {uops_3_exception}, {uops_2_exception}, {uops_1_exception}, {uops_0_exception}}; // @[util.scala:505:22, :547:21]
assign out_uop_exception = _GEN_83[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][63:0] _GEN_84 = {{uops_0_exc_cause}, {uops_14_exc_cause}, {uops_13_exc_cause}, {uops_12_exc_cause}, {uops_11_exc_cause}, {uops_10_exc_cause}, {uops_9_exc_cause}, {uops_8_exc_cause}, {uops_7_exc_cause}, {uops_6_exc_cause}, {uops_5_exc_cause}, {uops_4_exc_cause}, {uops_3_exc_cause}, {uops_2_exc_cause}, {uops_1_exc_cause}, {uops_0_exc_cause}}; // @[util.scala:505:22, :547:21]
assign out_uop_exc_cause = _GEN_84[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][4:0] _GEN_85 = {{uops_0_mem_cmd}, {uops_14_mem_cmd}, {uops_13_mem_cmd}, {uops_12_mem_cmd}, {uops_11_mem_cmd}, {uops_10_mem_cmd}, {uops_9_mem_cmd}, {uops_8_mem_cmd}, {uops_7_mem_cmd}, {uops_6_mem_cmd}, {uops_5_mem_cmd}, {uops_4_mem_cmd}, {uops_3_mem_cmd}, {uops_2_mem_cmd}, {uops_1_mem_cmd}, {uops_0_mem_cmd}}; // @[util.scala:505:22, :547:21]
assign out_uop_mem_cmd = _GEN_85[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_86 = {{uops_0_mem_size}, {uops_14_mem_size}, {uops_13_mem_size}, {uops_12_mem_size}, {uops_11_mem_size}, {uops_10_mem_size}, {uops_9_mem_size}, {uops_8_mem_size}, {uops_7_mem_size}, {uops_6_mem_size}, {uops_5_mem_size}, {uops_4_mem_size}, {uops_3_mem_size}, {uops_2_mem_size}, {uops_1_mem_size}, {uops_0_mem_size}}; // @[util.scala:505:22, :547:21]
assign out_uop_mem_size = _GEN_86[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_87 = {{uops_0_mem_signed}, {uops_14_mem_signed}, {uops_13_mem_signed}, {uops_12_mem_signed}, {uops_11_mem_signed}, {uops_10_mem_signed}, {uops_9_mem_signed}, {uops_8_mem_signed}, {uops_7_mem_signed}, {uops_6_mem_signed}, {uops_5_mem_signed}, {uops_4_mem_signed}, {uops_3_mem_signed}, {uops_2_mem_signed}, {uops_1_mem_signed}, {uops_0_mem_signed}}; // @[util.scala:505:22, :547:21]
assign out_uop_mem_signed = _GEN_87[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_88 = {{uops_0_uses_ldq}, {uops_14_uses_ldq}, {uops_13_uses_ldq}, {uops_12_uses_ldq}, {uops_11_uses_ldq}, {uops_10_uses_ldq}, {uops_9_uses_ldq}, {uops_8_uses_ldq}, {uops_7_uses_ldq}, {uops_6_uses_ldq}, {uops_5_uses_ldq}, {uops_4_uses_ldq}, {uops_3_uses_ldq}, {uops_2_uses_ldq}, {uops_1_uses_ldq}, {uops_0_uses_ldq}}; // @[util.scala:505:22, :547:21]
assign out_uop_uses_ldq = _GEN_88[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_89 = {{uops_0_uses_stq}, {uops_14_uses_stq}, {uops_13_uses_stq}, {uops_12_uses_stq}, {uops_11_uses_stq}, {uops_10_uses_stq}, {uops_9_uses_stq}, {uops_8_uses_stq}, {uops_7_uses_stq}, {uops_6_uses_stq}, {uops_5_uses_stq}, {uops_4_uses_stq}, {uops_3_uses_stq}, {uops_2_uses_stq}, {uops_1_uses_stq}, {uops_0_uses_stq}}; // @[util.scala:505:22, :547:21]
assign out_uop_uses_stq = _GEN_89[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_90 = {{uops_0_is_unique}, {uops_14_is_unique}, {uops_13_is_unique}, {uops_12_is_unique}, {uops_11_is_unique}, {uops_10_is_unique}, {uops_9_is_unique}, {uops_8_is_unique}, {uops_7_is_unique}, {uops_6_is_unique}, {uops_5_is_unique}, {uops_4_is_unique}, {uops_3_is_unique}, {uops_2_is_unique}, {uops_1_is_unique}, {uops_0_is_unique}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_unique = _GEN_90[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_91 = {{uops_0_flush_on_commit}, {uops_14_flush_on_commit}, {uops_13_flush_on_commit}, {uops_12_flush_on_commit}, {uops_11_flush_on_commit}, {uops_10_flush_on_commit}, {uops_9_flush_on_commit}, {uops_8_flush_on_commit}, {uops_7_flush_on_commit}, {uops_6_flush_on_commit}, {uops_5_flush_on_commit}, {uops_4_flush_on_commit}, {uops_3_flush_on_commit}, {uops_2_flush_on_commit}, {uops_1_flush_on_commit}, {uops_0_flush_on_commit}}; // @[util.scala:505:22, :547:21]
assign out_uop_flush_on_commit = _GEN_91[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][2:0] _GEN_92 = {{uops_0_csr_cmd}, {uops_14_csr_cmd}, {uops_13_csr_cmd}, {uops_12_csr_cmd}, {uops_11_csr_cmd}, {uops_10_csr_cmd}, {uops_9_csr_cmd}, {uops_8_csr_cmd}, {uops_7_csr_cmd}, {uops_6_csr_cmd}, {uops_5_csr_cmd}, {uops_4_csr_cmd}, {uops_3_csr_cmd}, {uops_2_csr_cmd}, {uops_1_csr_cmd}, {uops_0_csr_cmd}}; // @[util.scala:505:22, :547:21]
assign out_uop_csr_cmd = _GEN_92[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_93 = {{uops_0_ldst_is_rs1}, {uops_14_ldst_is_rs1}, {uops_13_ldst_is_rs1}, {uops_12_ldst_is_rs1}, {uops_11_ldst_is_rs1}, {uops_10_ldst_is_rs1}, {uops_9_ldst_is_rs1}, {uops_8_ldst_is_rs1}, {uops_7_ldst_is_rs1}, {uops_6_ldst_is_rs1}, {uops_5_ldst_is_rs1}, {uops_4_ldst_is_rs1}, {uops_3_ldst_is_rs1}, {uops_2_ldst_is_rs1}, {uops_1_ldst_is_rs1}, {uops_0_ldst_is_rs1}}; // @[util.scala:505:22, :547:21]
assign out_uop_ldst_is_rs1 = _GEN_93[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_94 = {{uops_0_ldst}, {uops_14_ldst}, {uops_13_ldst}, {uops_12_ldst}, {uops_11_ldst}, {uops_10_ldst}, {uops_9_ldst}, {uops_8_ldst}, {uops_7_ldst}, {uops_6_ldst}, {uops_5_ldst}, {uops_4_ldst}, {uops_3_ldst}, {uops_2_ldst}, {uops_1_ldst}, {uops_0_ldst}}; // @[util.scala:505:22, :547:21]
assign out_uop_ldst = _GEN_94[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_95 = {{uops_0_lrs1}, {uops_14_lrs1}, {uops_13_lrs1}, {uops_12_lrs1}, {uops_11_lrs1}, {uops_10_lrs1}, {uops_9_lrs1}, {uops_8_lrs1}, {uops_7_lrs1}, {uops_6_lrs1}, {uops_5_lrs1}, {uops_4_lrs1}, {uops_3_lrs1}, {uops_2_lrs1}, {uops_1_lrs1}, {uops_0_lrs1}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs1 = _GEN_95[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_96 = {{uops_0_lrs2}, {uops_14_lrs2}, {uops_13_lrs2}, {uops_12_lrs2}, {uops_11_lrs2}, {uops_10_lrs2}, {uops_9_lrs2}, {uops_8_lrs2}, {uops_7_lrs2}, {uops_6_lrs2}, {uops_5_lrs2}, {uops_4_lrs2}, {uops_3_lrs2}, {uops_2_lrs2}, {uops_1_lrs2}, {uops_0_lrs2}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs2 = _GEN_96[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_97 = {{uops_0_lrs3}, {uops_14_lrs3}, {uops_13_lrs3}, {uops_12_lrs3}, {uops_11_lrs3}, {uops_10_lrs3}, {uops_9_lrs3}, {uops_8_lrs3}, {uops_7_lrs3}, {uops_6_lrs3}, {uops_5_lrs3}, {uops_4_lrs3}, {uops_3_lrs3}, {uops_2_lrs3}, {uops_1_lrs3}, {uops_0_lrs3}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs3 = _GEN_97[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_98 = {{uops_0_dst_rtype}, {uops_14_dst_rtype}, {uops_13_dst_rtype}, {uops_12_dst_rtype}, {uops_11_dst_rtype}, {uops_10_dst_rtype}, {uops_9_dst_rtype}, {uops_8_dst_rtype}, {uops_7_dst_rtype}, {uops_6_dst_rtype}, {uops_5_dst_rtype}, {uops_4_dst_rtype}, {uops_3_dst_rtype}, {uops_2_dst_rtype}, {uops_1_dst_rtype}, {uops_0_dst_rtype}}; // @[util.scala:505:22, :547:21]
assign out_uop_dst_rtype = _GEN_98[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_99 = {{uops_0_lrs1_rtype}, {uops_14_lrs1_rtype}, {uops_13_lrs1_rtype}, {uops_12_lrs1_rtype}, {uops_11_lrs1_rtype}, {uops_10_lrs1_rtype}, {uops_9_lrs1_rtype}, {uops_8_lrs1_rtype}, {uops_7_lrs1_rtype}, {uops_6_lrs1_rtype}, {uops_5_lrs1_rtype}, {uops_4_lrs1_rtype}, {uops_3_lrs1_rtype}, {uops_2_lrs1_rtype}, {uops_1_lrs1_rtype}, {uops_0_lrs1_rtype}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs1_rtype = _GEN_99[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_100 = {{uops_0_lrs2_rtype}, {uops_14_lrs2_rtype}, {uops_13_lrs2_rtype}, {uops_12_lrs2_rtype}, {uops_11_lrs2_rtype}, {uops_10_lrs2_rtype}, {uops_9_lrs2_rtype}, {uops_8_lrs2_rtype}, {uops_7_lrs2_rtype}, {uops_6_lrs2_rtype}, {uops_5_lrs2_rtype}, {uops_4_lrs2_rtype}, {uops_3_lrs2_rtype}, {uops_2_lrs2_rtype}, {uops_1_lrs2_rtype}, {uops_0_lrs2_rtype}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs2_rtype = _GEN_100[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_101 = {{uops_0_frs3_en}, {uops_14_frs3_en}, {uops_13_frs3_en}, {uops_12_frs3_en}, {uops_11_frs3_en}, {uops_10_frs3_en}, {uops_9_frs3_en}, {uops_8_frs3_en}, {uops_7_frs3_en}, {uops_6_frs3_en}, {uops_5_frs3_en}, {uops_4_frs3_en}, {uops_3_frs3_en}, {uops_2_frs3_en}, {uops_1_frs3_en}, {uops_0_frs3_en}}; // @[util.scala:505:22, :547:21]
assign out_uop_frs3_en = _GEN_101[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_102 = {{uops_0_fcn_dw}, {uops_14_fcn_dw}, {uops_13_fcn_dw}, {uops_12_fcn_dw}, {uops_11_fcn_dw}, {uops_10_fcn_dw}, {uops_9_fcn_dw}, {uops_8_fcn_dw}, {uops_7_fcn_dw}, {uops_6_fcn_dw}, {uops_5_fcn_dw}, {uops_4_fcn_dw}, {uops_3_fcn_dw}, {uops_2_fcn_dw}, {uops_1_fcn_dw}, {uops_0_fcn_dw}}; // @[util.scala:505:22, :547:21]
assign out_uop_fcn_dw = _GEN_102[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][4:0] _GEN_103 = {{uops_0_fcn_op}, {uops_14_fcn_op}, {uops_13_fcn_op}, {uops_12_fcn_op}, {uops_11_fcn_op}, {uops_10_fcn_op}, {uops_9_fcn_op}, {uops_8_fcn_op}, {uops_7_fcn_op}, {uops_6_fcn_op}, {uops_5_fcn_op}, {uops_4_fcn_op}, {uops_3_fcn_op}, {uops_2_fcn_op}, {uops_1_fcn_op}, {uops_0_fcn_op}}; // @[util.scala:505:22, :547:21]
assign out_uop_fcn_op = _GEN_103[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_104 = {{uops_0_fp_val}, {uops_14_fp_val}, {uops_13_fp_val}, {uops_12_fp_val}, {uops_11_fp_val}, {uops_10_fp_val}, {uops_9_fp_val}, {uops_8_fp_val}, {uops_7_fp_val}, {uops_6_fp_val}, {uops_5_fp_val}, {uops_4_fp_val}, {uops_3_fp_val}, {uops_2_fp_val}, {uops_1_fp_val}, {uops_0_fp_val}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_val = _GEN_104[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][2:0] _GEN_105 = {{uops_0_fp_rm}, {uops_14_fp_rm}, {uops_13_fp_rm}, {uops_12_fp_rm}, {uops_11_fp_rm}, {uops_10_fp_rm}, {uops_9_fp_rm}, {uops_8_fp_rm}, {uops_7_fp_rm}, {uops_6_fp_rm}, {uops_5_fp_rm}, {uops_4_fp_rm}, {uops_3_fp_rm}, {uops_2_fp_rm}, {uops_1_fp_rm}, {uops_0_fp_rm}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_rm = _GEN_105[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_106 = {{uops_0_fp_typ}, {uops_14_fp_typ}, {uops_13_fp_typ}, {uops_12_fp_typ}, {uops_11_fp_typ}, {uops_10_fp_typ}, {uops_9_fp_typ}, {uops_8_fp_typ}, {uops_7_fp_typ}, {uops_6_fp_typ}, {uops_5_fp_typ}, {uops_4_fp_typ}, {uops_3_fp_typ}, {uops_2_fp_typ}, {uops_1_fp_typ}, {uops_0_fp_typ}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_typ = _GEN_106[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_107 = {{uops_0_xcpt_pf_if}, {uops_14_xcpt_pf_if}, {uops_13_xcpt_pf_if}, {uops_12_xcpt_pf_if}, {uops_11_xcpt_pf_if}, {uops_10_xcpt_pf_if}, {uops_9_xcpt_pf_if}, {uops_8_xcpt_pf_if}, {uops_7_xcpt_pf_if}, {uops_6_xcpt_pf_if}, {uops_5_xcpt_pf_if}, {uops_4_xcpt_pf_if}, {uops_3_xcpt_pf_if}, {uops_2_xcpt_pf_if}, {uops_1_xcpt_pf_if}, {uops_0_xcpt_pf_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_xcpt_pf_if = _GEN_107[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_108 = {{uops_0_xcpt_ae_if}, {uops_14_xcpt_ae_if}, {uops_13_xcpt_ae_if}, {uops_12_xcpt_ae_if}, {uops_11_xcpt_ae_if}, {uops_10_xcpt_ae_if}, {uops_9_xcpt_ae_if}, {uops_8_xcpt_ae_if}, {uops_7_xcpt_ae_if}, {uops_6_xcpt_ae_if}, {uops_5_xcpt_ae_if}, {uops_4_xcpt_ae_if}, {uops_3_xcpt_ae_if}, {uops_2_xcpt_ae_if}, {uops_1_xcpt_ae_if}, {uops_0_xcpt_ae_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_xcpt_ae_if = _GEN_108[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_109 = {{uops_0_xcpt_ma_if}, {uops_14_xcpt_ma_if}, {uops_13_xcpt_ma_if}, {uops_12_xcpt_ma_if}, {uops_11_xcpt_ma_if}, {uops_10_xcpt_ma_if}, {uops_9_xcpt_ma_if}, {uops_8_xcpt_ma_if}, {uops_7_xcpt_ma_if}, {uops_6_xcpt_ma_if}, {uops_5_xcpt_ma_if}, {uops_4_xcpt_ma_if}, {uops_3_xcpt_ma_if}, {uops_2_xcpt_ma_if}, {uops_1_xcpt_ma_if}, {uops_0_xcpt_ma_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_xcpt_ma_if = _GEN_109[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_110 = {{uops_0_bp_debug_if}, {uops_14_bp_debug_if}, {uops_13_bp_debug_if}, {uops_12_bp_debug_if}, {uops_11_bp_debug_if}, {uops_10_bp_debug_if}, {uops_9_bp_debug_if}, {uops_8_bp_debug_if}, {uops_7_bp_debug_if}, {uops_6_bp_debug_if}, {uops_5_bp_debug_if}, {uops_4_bp_debug_if}, {uops_3_bp_debug_if}, {uops_2_bp_debug_if}, {uops_1_bp_debug_if}, {uops_0_bp_debug_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_bp_debug_if = _GEN_110[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_111 = {{uops_0_bp_xcpt_if}, {uops_14_bp_xcpt_if}, {uops_13_bp_xcpt_if}, {uops_12_bp_xcpt_if}, {uops_11_bp_xcpt_if}, {uops_10_bp_xcpt_if}, {uops_9_bp_xcpt_if}, {uops_8_bp_xcpt_if}, {uops_7_bp_xcpt_if}, {uops_6_bp_xcpt_if}, {uops_5_bp_xcpt_if}, {uops_4_bp_xcpt_if}, {uops_3_bp_xcpt_if}, {uops_2_bp_xcpt_if}, {uops_1_bp_xcpt_if}, {uops_0_bp_xcpt_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_bp_xcpt_if = _GEN_111[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][2:0] _GEN_112 = {{uops_0_debug_fsrc}, {uops_14_debug_fsrc}, {uops_13_debug_fsrc}, {uops_12_debug_fsrc}, {uops_11_debug_fsrc}, {uops_10_debug_fsrc}, {uops_9_debug_fsrc}, {uops_8_debug_fsrc}, {uops_7_debug_fsrc}, {uops_6_debug_fsrc}, {uops_5_debug_fsrc}, {uops_4_debug_fsrc}, {uops_3_debug_fsrc}, {uops_2_debug_fsrc}, {uops_1_debug_fsrc}, {uops_0_debug_fsrc}}; // @[util.scala:505:22, :547:21]
assign out_uop_debug_fsrc = _GEN_112[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][2:0] _GEN_113 = {{uops_0_debug_tsrc}, {uops_14_debug_tsrc}, {uops_13_debug_tsrc}, {uops_12_debug_tsrc}, {uops_11_debug_tsrc}, {uops_10_debug_tsrc}, {uops_9_debug_tsrc}, {uops_8_debug_tsrc}, {uops_7_debug_tsrc}, {uops_6_debug_tsrc}, {uops_5_debug_tsrc}, {uops_4_debug_tsrc}, {uops_3_debug_tsrc}, {uops_2_debug_tsrc}, {uops_1_debug_tsrc}, {uops_0_debug_tsrc}}; // @[util.scala:505:22, :547:21]
assign out_uop_debug_tsrc = _GEN_113[deq_ptr_value]; // @[Counter.scala:61:40]
wire _io_deq_valid_T = ~io_empty_0; // @[util.scala:458:7, :515:71, :548:32]
assign _io_deq_valid_T_1 = _io_deq_valid_T & _GEN_0; // @[util.scala:515:44, :548:{32,42}]
assign io_deq_valid_0 = _io_deq_valid_T_1; // @[util.scala:458:7, :548:42]
wire [4:0] _ptr_diff_T = _GEN_1 - _GEN_2; // @[Counter.scala:77:24]
wire [3:0] ptr_diff = _ptr_diff_T[3:0]; // @[util.scala:551:34]
wire [3:0] _io_count_T = {4{maybe_full}}; // @[util.scala:509:29, :557:12]
wire _io_count_T_1 = deq_ptr_value > enq_ptr_value; // @[Counter.scala:61:40]
wire [4:0] _io_count_T_2 = {1'h0, ptr_diff} + 5'hF; // @[util.scala:551:34, :560:26]
wire [3:0] _io_count_T_3 = _io_count_T_2[3:0]; // @[util.scala:560:26]
wire [3:0] _io_count_T_4 = _io_count_T_1 ? _io_count_T_3 : ptr_diff; // @[util.scala:551:34, :559:{12,27}, :560:26]
assign _io_count_T_5 = ptr_match ? _io_count_T : _io_count_T_4; // @[util.scala:511:35, :556:22, :557:12, :559:12]
assign io_count_0 = _io_count_T_5; // @[util.scala:458:7, :556:22]
wire _GEN_114 = enq_ptr_value == 4'h0; // @[Counter.scala:61:40]
wire _GEN_115 = do_enq & _GEN_114; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_116 = enq_ptr_value == 4'h1; // @[Counter.scala:61:40]
wire _GEN_117 = do_enq & _GEN_116; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_118 = enq_ptr_value == 4'h2; // @[Counter.scala:61:40]
wire _GEN_119 = do_enq & _GEN_118; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_120 = enq_ptr_value == 4'h3; // @[Counter.scala:61:40]
wire _GEN_121 = do_enq & _GEN_120; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_122 = enq_ptr_value == 4'h4; // @[Counter.scala:61:40]
wire _GEN_123 = do_enq & _GEN_122; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_124 = enq_ptr_value == 4'h5; // @[Counter.scala:61:40]
wire _GEN_125 = do_enq & _GEN_124; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_126 = enq_ptr_value == 4'h6; // @[Counter.scala:61:40]
wire _GEN_127 = do_enq & _GEN_126; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_128 = enq_ptr_value == 4'h7; // @[Counter.scala:61:40]
wire _GEN_129 = do_enq & _GEN_128; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_130 = enq_ptr_value == 4'h8; // @[Counter.scala:61:40]
wire _GEN_131 = do_enq & _GEN_130; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_132 = enq_ptr_value == 4'h9; // @[Counter.scala:61:40]
wire _GEN_133 = do_enq & _GEN_132; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_134 = enq_ptr_value == 4'hA; // @[Counter.scala:61:40]
wire _GEN_135 = do_enq & _GEN_134; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_136 = enq_ptr_value == 4'hB; // @[Counter.scala:61:40]
wire _GEN_137 = do_enq & _GEN_136; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_138 = enq_ptr_value == 4'hC; // @[Counter.scala:61:40]
wire _GEN_139 = do_enq & _GEN_138; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_140 = enq_ptr_value == 4'hD; // @[Counter.scala:61:40]
wire _GEN_141 = do_enq & _GEN_140; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_142 = do_enq & wrap; // @[Counter.scala:73:24]
always @(posedge clock) begin // @[util.scala:458:7]
if (reset) begin // @[util.scala:458:7]
valids_0 <= 1'h0; // @[util.scala:504:26]
valids_1 <= 1'h0; // @[util.scala:504:26]
valids_2 <= 1'h0; // @[util.scala:504:26]
valids_3 <= 1'h0; // @[util.scala:504:26]
valids_4 <= 1'h0; // @[util.scala:504:26]
valids_5 <= 1'h0; // @[util.scala:504:26]
valids_6 <= 1'h0; // @[util.scala:504:26]
valids_7 <= 1'h0; // @[util.scala:504:26]
valids_8 <= 1'h0; // @[util.scala:504:26]
valids_9 <= 1'h0; // @[util.scala:504:26]
valids_10 <= 1'h0; // @[util.scala:504:26]
valids_11 <= 1'h0; // @[util.scala:504:26]
valids_12 <= 1'h0; // @[util.scala:504:26]
valids_13 <= 1'h0; // @[util.scala:504:26]
valids_14 <= 1'h0; // @[util.scala:504:26]
enq_ptr_value <= 4'h0; // @[Counter.scala:61:40]
deq_ptr_value <= 4'h0; // @[Counter.scala:61:40]
maybe_full <= 1'h0; // @[util.scala:509:29]
end
else begin // @[util.scala:458:7]
valids_0 <= ~(do_deq & deq_ptr_value == 4'h0) & (_GEN_115 | _valids_0_T_7); // @[Counter.scala:61:40]
valids_1 <= ~(do_deq & deq_ptr_value == 4'h1) & (_GEN_117 | _valids_1_T_7); // @[Counter.scala:61:40]
valids_2 <= ~(do_deq & deq_ptr_value == 4'h2) & (_GEN_119 | _valids_2_T_7); // @[Counter.scala:61:40]
valids_3 <= ~(do_deq & deq_ptr_value == 4'h3) & (_GEN_121 | _valids_3_T_7); // @[Counter.scala:61:40]
valids_4 <= ~(do_deq & deq_ptr_value == 4'h4) & (_GEN_123 | _valids_4_T_7); // @[Counter.scala:61:40]
valids_5 <= ~(do_deq & deq_ptr_value == 4'h5) & (_GEN_125 | _valids_5_T_7); // @[Counter.scala:61:40]
valids_6 <= ~(do_deq & deq_ptr_value == 4'h6) & (_GEN_127 | _valids_6_T_7); // @[Counter.scala:61:40]
valids_7 <= ~(do_deq & deq_ptr_value == 4'h7) & (_GEN_129 | _valids_7_T_7); // @[Counter.scala:61:40]
valids_8 <= ~(do_deq & deq_ptr_value == 4'h8) & (_GEN_131 | _valids_8_T_7); // @[Counter.scala:61:40]
valids_9 <= ~(do_deq & deq_ptr_value == 4'h9) & (_GEN_133 | _valids_9_T_7); // @[Counter.scala:61:40]
valids_10 <= ~(do_deq & deq_ptr_value == 4'hA) & (_GEN_135 | _valids_10_T_7); // @[Counter.scala:61:40]
valids_11 <= ~(do_deq & deq_ptr_value == 4'hB) & (_GEN_137 | _valids_11_T_7); // @[Counter.scala:61:40]
valids_12 <= ~(do_deq & deq_ptr_value == 4'hC) & (_GEN_139 | _valids_12_T_7); // @[Counter.scala:61:40]
valids_13 <= ~(do_deq & deq_ptr_value == 4'hD) & (_GEN_141 | _valids_13_T_7); // @[Counter.scala:61:40]
valids_14 <= ~(do_deq & wrap_1) & (_GEN_142 | _valids_14_T_7); // @[Counter.scala:73:24]
if (do_enq) // @[util.scala:514:26]
enq_ptr_value <= wrap ? 4'h0 : _value_T_1; // @[Counter.scala:61:40, :73:24, :77:{15,24}, :87:{20,28}]
if (do_deq) // @[util.scala:515:26]
deq_ptr_value <= wrap_1 ? 4'h0 : _value_T_3; // @[Counter.scala:61:40, :73:24, :77:{15,24}, :87:{20,28}]
if (~(do_enq == do_deq)) // @[util.scala:509:29, :514:26, :515:26, :539:{18,30}, :540:18]
maybe_full <= do_enq; // @[util.scala:509:29, :514:26]
end
if (_GEN_115) begin // @[util.scala:520:18, :526:19, :528:35]
uops_0_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_0_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_0_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_0_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_0_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_0_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_0_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_0_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_0_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_0_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_0_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_0_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_0_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_0_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_0_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_0_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_0_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_0_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_0_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_0_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_0_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_0_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_0_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_0_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_0_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_0_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_0_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_0_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_0_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_0_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_0_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_0_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_0_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_0_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_0_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_0_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_0_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_0_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_0_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_0_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_0_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_0_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_0_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_0_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_0_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_0_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_0_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_0_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_0_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_0_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_0_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_0_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_0_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_0_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_0_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_0_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_0_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_0_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_0_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_0_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_0_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_0_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_0_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_0_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_0_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_0_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_0_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_0_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_0_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_0_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_0_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_0_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_0_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_0_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_0_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_0_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_0_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_0_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_114) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_0_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_0) // @[util.scala:504:26]
uops_0_br_mask <= _uops_0_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_117) begin // @[util.scala:520:18, :526:19, :528:35]
uops_1_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_1_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_1_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_1_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_1_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_1_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_1_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_1_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_1_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_1_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_1_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_1_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_1_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_1_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_1_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_1_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_1_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_1_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_1_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_1_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_1_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_1_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_1_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_1_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_1_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_1_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_1_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_1_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_1_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_1_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_1_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_1_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_1_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_1_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_1_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_1_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_1_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_1_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_1_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_1_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_1_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_1_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_1_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_1_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_1_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_1_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_1_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_1_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_1_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_1_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_1_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_1_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_1_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_1_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_1_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_1_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_1_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_1_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_1_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_1_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_1_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_1_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_1_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_1_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_1_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_1_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_1_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_1_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_1_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_1_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_1_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_1_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_1_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_1_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_1_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_1_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_1_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_1_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_116) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_1_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_1) // @[util.scala:504:26]
uops_1_br_mask <= _uops_1_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_119) begin // @[util.scala:520:18, :526:19, :528:35]
uops_2_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_2_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_2_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_2_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_2_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_2_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_2_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_2_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_2_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_2_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_2_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_2_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_2_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_2_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_2_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_2_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_2_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_2_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_2_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_2_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_2_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_2_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_2_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_2_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_2_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_2_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_2_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_2_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_2_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_2_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_2_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_2_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_2_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_2_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_2_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_2_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_2_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_2_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_2_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_2_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_2_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_2_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_2_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_2_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_2_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_2_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_2_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_2_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_2_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_2_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_2_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_2_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_2_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_2_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_2_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_2_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_2_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_2_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_2_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_2_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_2_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_2_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_2_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_2_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_2_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_2_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_2_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_2_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_2_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_2_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_2_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_2_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_2_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_2_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_2_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_2_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_2_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_2_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_118) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_2_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_2) // @[util.scala:504:26]
uops_2_br_mask <= _uops_2_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_121) begin // @[util.scala:520:18, :526:19, :528:35]
uops_3_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_3_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_3_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_3_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_3_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_3_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_3_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_3_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_3_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_3_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_3_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_3_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_3_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_3_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_3_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_3_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_3_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_3_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_3_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_3_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_3_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_3_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_3_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_3_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_3_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_3_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_3_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_3_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_3_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_3_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_3_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_3_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_3_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_3_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_3_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_3_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_3_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_3_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_3_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_3_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_3_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_3_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_3_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_3_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_3_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_3_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_3_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_3_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_3_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_3_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_3_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_3_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_3_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_3_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_3_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_3_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_3_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_3_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_3_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_3_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_3_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_3_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_3_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_3_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_3_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_3_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_3_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_3_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_3_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_3_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_3_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_3_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_3_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_3_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_3_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_3_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_3_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_3_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_120) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_3_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_3) // @[util.scala:504:26]
uops_3_br_mask <= _uops_3_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_123) begin // @[util.scala:520:18, :526:19, :528:35]
uops_4_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_4_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_4_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_4_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_4_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_4_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_4_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_4_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_4_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_4_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_4_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_4_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_4_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_4_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_4_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_4_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_4_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_4_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_4_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_4_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_4_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_4_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_4_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_4_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_4_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_4_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_4_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_4_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_4_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_4_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_4_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_4_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_4_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_4_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_4_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_4_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_4_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_4_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_4_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_4_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_4_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_4_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_4_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_4_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_4_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_4_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_4_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_4_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_4_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_4_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_4_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_4_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_4_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_4_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_4_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_4_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_4_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_4_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_4_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_4_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_4_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_4_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_4_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_4_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_4_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_4_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_4_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_4_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_4_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_4_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_4_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_4_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_4_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_4_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_4_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_4_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_4_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_4_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_122) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_4_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_4) // @[util.scala:504:26]
uops_4_br_mask <= _uops_4_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_125) begin // @[util.scala:520:18, :526:19, :528:35]
uops_5_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_5_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_5_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_5_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_5_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_5_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_5_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_5_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_5_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_5_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_5_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_5_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_5_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_5_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_5_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_5_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_5_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_5_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_5_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_5_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_5_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_5_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_5_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_5_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_5_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_5_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_5_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_5_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_5_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_5_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_5_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_5_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_5_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_5_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_5_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_5_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_5_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_5_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_5_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_5_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_5_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_5_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_5_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_5_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_5_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_5_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_5_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_5_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_5_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_5_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_5_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_5_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_5_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_5_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_5_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_5_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_5_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_5_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_5_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_5_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_5_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_5_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_5_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_5_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_5_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_5_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_5_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_5_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_5_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_5_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_5_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_5_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_5_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_5_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_5_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_5_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_5_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_5_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_124) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_5_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_5) // @[util.scala:504:26]
uops_5_br_mask <= _uops_5_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_127) begin // @[util.scala:520:18, :526:19, :528:35]
uops_6_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_6_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_6_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_6_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_6_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_6_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_6_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_6_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_6_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_6_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_6_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_6_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_6_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_6_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_6_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_6_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_6_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_6_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_6_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_6_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_6_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_6_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_6_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_6_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_6_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_6_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_6_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_6_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_6_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_6_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_6_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_6_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_6_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_6_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_6_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_6_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_6_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_6_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_6_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_6_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_6_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_6_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_6_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_6_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_6_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_6_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_6_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_6_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_6_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_6_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_6_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_6_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_6_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_6_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_6_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_6_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_6_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_6_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_6_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_6_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_6_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_6_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_6_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_6_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_6_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_6_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_6_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_6_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_6_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_6_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_6_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_6_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_6_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_6_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_6_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_6_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_6_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_6_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_126) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_6_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_6) // @[util.scala:504:26]
uops_6_br_mask <= _uops_6_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_129) begin // @[util.scala:520:18, :526:19, :528:35]
uops_7_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_7_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_7_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_7_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_7_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_7_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_7_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_7_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_7_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_7_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_7_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_7_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_7_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_7_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_7_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_7_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_7_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_7_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_7_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_7_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_7_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_7_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_7_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_7_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_7_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_7_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_7_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_7_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_7_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_7_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_7_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_7_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_7_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_7_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_7_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_7_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_7_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_7_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_7_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_7_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_7_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_7_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_7_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_7_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_7_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_7_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_7_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_7_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_7_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_7_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_7_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_7_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_7_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_7_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_7_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_7_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_7_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_7_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_7_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_7_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_7_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_7_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_7_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_7_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_7_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_7_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_7_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_7_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_7_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_7_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_7_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_7_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_7_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_7_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_7_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_7_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_7_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_7_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_128) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_7_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_7) // @[util.scala:504:26]
uops_7_br_mask <= _uops_7_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_131) begin // @[util.scala:520:18, :526:19, :528:35]
uops_8_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_8_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_8_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_8_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_8_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_8_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_8_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_8_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_8_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_8_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_8_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_8_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_8_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_8_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_8_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_8_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_8_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_8_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_8_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_8_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_8_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_8_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_8_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_8_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_8_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_8_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_8_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_8_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_8_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_8_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_8_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_8_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_8_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_8_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_8_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_8_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_8_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_8_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_8_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_8_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_8_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_8_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_8_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_8_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_8_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_8_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_8_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_8_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_8_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_8_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_8_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_8_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_8_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_8_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_8_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_8_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_8_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_8_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_8_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_8_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_8_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_8_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_8_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_8_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_8_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_8_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_8_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_8_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_8_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_8_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_8_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_8_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_8_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_8_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_8_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_8_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_8_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_8_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_8_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_8_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_8_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_8_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_8_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_8_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_8_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_130) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_8_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_8) // @[util.scala:504:26]
uops_8_br_mask <= _uops_8_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_133) begin // @[util.scala:520:18, :526:19, :528:35]
uops_9_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_9_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_9_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_9_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_9_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_9_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_9_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_9_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_9_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_9_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_9_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_9_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_9_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_9_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_9_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_9_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_9_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_9_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_9_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_9_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_9_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_9_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_9_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_9_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_9_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_9_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_9_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_9_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_9_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_9_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_9_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_9_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_9_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_9_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_9_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_9_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_9_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_9_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_9_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_9_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_9_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_9_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_9_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_9_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_9_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_9_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_9_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_9_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_9_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_9_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_9_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_9_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_9_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_9_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_9_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_9_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_9_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_9_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_9_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_9_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_9_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_9_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_9_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_9_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_9_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_9_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_9_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_9_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_9_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_9_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_9_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_9_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_9_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_9_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_9_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_9_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_9_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_9_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_9_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_9_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_9_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_9_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_9_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_9_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_9_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_132) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_9_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_9) // @[util.scala:504:26]
uops_9_br_mask <= _uops_9_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_135) begin // @[util.scala:520:18, :526:19, :528:35]
uops_10_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_10_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_10_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_10_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_10_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_10_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_10_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_10_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_10_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_10_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_10_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_10_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_10_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_10_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_10_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_10_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_10_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_10_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_10_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_10_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_10_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_10_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_10_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_10_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_10_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_10_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_10_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_10_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_10_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_10_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_10_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_10_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_10_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_10_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_10_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_10_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_10_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_10_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_10_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_10_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_10_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_10_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_10_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_10_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_10_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_10_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_10_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_10_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_10_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_10_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_10_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_10_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_10_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_10_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_10_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_10_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_10_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_10_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_10_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_10_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_10_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_10_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_10_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_10_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_10_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_10_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_10_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_10_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_10_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_10_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_10_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_10_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_10_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_10_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_10_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_10_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_10_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_10_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_10_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_10_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_10_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_10_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_10_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_10_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_10_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_134) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_10_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_10) // @[util.scala:504:26]
uops_10_br_mask <= _uops_10_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_137) begin // @[util.scala:520:18, :526:19, :528:35]
uops_11_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_11_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_11_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_11_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_11_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_11_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_11_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_11_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_11_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_11_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_11_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_11_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_11_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_11_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_11_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_11_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_11_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_11_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_11_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_11_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_11_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_11_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_11_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_11_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_11_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_11_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_11_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_11_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_11_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_11_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_11_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_11_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_11_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_11_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_11_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_11_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_11_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_11_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_11_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_11_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_11_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_11_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_11_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_11_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_11_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_11_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_11_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_11_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_11_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_11_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_11_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_11_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_11_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_11_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_11_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_11_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_11_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_11_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_11_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_11_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_11_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_11_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_11_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_11_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_11_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_11_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_11_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_11_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_11_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_11_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_11_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_11_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_11_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_11_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_11_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_11_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_11_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_11_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_11_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_11_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_11_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_11_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_11_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_11_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_11_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_136) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_11_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_11) // @[util.scala:504:26]
uops_11_br_mask <= _uops_11_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_139) begin // @[util.scala:520:18, :526:19, :528:35]
uops_12_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_12_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_12_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_12_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_12_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_12_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_12_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_12_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_12_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_12_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_12_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_12_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_12_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_12_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_12_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_12_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_12_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_12_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_12_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_12_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_12_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_12_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_12_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_12_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_12_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_12_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_12_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_12_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_12_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_12_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_12_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_12_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_12_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_12_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_12_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_12_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_12_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_12_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_12_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_12_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_12_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_12_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_12_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_12_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_12_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_12_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_12_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_12_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_12_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_12_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_12_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_12_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_12_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_12_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_12_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_12_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_12_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_12_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_12_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_12_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_12_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_12_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_12_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_12_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_12_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_12_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_12_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_12_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_12_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_12_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_12_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_12_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_12_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_12_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_12_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_12_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_12_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_12_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_12_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_12_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_12_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_12_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_12_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_12_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_12_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_138) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_12_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_12) // @[util.scala:504:26]
uops_12_br_mask <= _uops_12_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_141) begin // @[util.scala:520:18, :526:19, :528:35]
uops_13_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_13_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_13_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_13_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_13_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_13_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_13_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_13_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_13_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_13_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_13_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_13_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_13_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_13_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_13_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_13_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_13_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_13_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_13_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_13_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_13_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_13_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_13_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_13_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_13_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_13_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_13_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_13_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_13_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_13_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_13_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_13_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_13_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_13_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_13_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_13_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_13_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_13_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_13_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_13_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_13_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_13_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_13_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_13_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_13_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_13_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_13_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_13_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_13_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_13_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_13_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_13_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_13_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_13_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_13_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_13_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_13_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_13_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_13_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_13_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_13_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_13_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_13_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_13_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_13_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_13_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_13_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_13_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_13_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_13_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_13_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_13_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_13_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_13_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_13_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_13_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_13_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_13_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_13_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_13_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_13_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_13_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_13_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_13_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_13_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_140) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_13_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_13) // @[util.scala:504:26]
uops_13_br_mask <= _uops_13_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_142) begin // @[util.scala:520:18, :526:19, :528:35]
uops_14_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_14_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_14_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_14_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_14_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_14_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_14_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_14_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_14_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_14_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_14_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_14_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_14_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_14_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_14_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_14_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_14_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_14_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_14_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_14_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_14_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_14_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_14_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_14_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_14_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_14_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_14_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_14_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_14_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_14_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_14_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_14_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_14_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_14_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_14_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_14_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_14_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_14_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_14_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_14_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_14_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_14_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_14_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_14_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_14_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_14_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_14_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_14_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_14_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_14_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_14_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_14_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_14_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_14_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_14_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_14_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_14_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_14_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_14_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_14_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_14_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_14_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_14_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_14_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_14_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_14_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_14_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_14_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_14_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_14_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_14_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_14_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_14_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_14_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_14_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_14_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_14_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_14_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_14_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_14_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_14_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_14_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_14_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_14_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_14_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & wrap) // @[Counter.scala:73:24]
uops_14_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_14) // @[util.scala:504:26]
uops_14_br_mask <= _uops_14_br_mask_T_1; // @[util.scala:97:21, :505:22]
always @(posedge)
ram_15x131 ram_ext ( // @[util.scala:503:22]
.R0_addr (deq_ptr_value), // @[Counter.scala:61:40]
.R0_en (1'h1),
.R0_clk (clock),
.R0_data (_ram_ext_R0_data),
.W0_addr (enq_ptr_value), // @[Counter.scala:61:40]
.W0_en (do_enq), // @[util.scala:514:26]
.W0_clk (clock),
.W0_data ({io_enq_bits_sdq_id_0, io_enq_bits_way_en_0, io_enq_bits_old_meta_tag_0, io_enq_bits_old_meta_coh_state_0, io_enq_bits_tag_match_0, io_enq_bits_is_hella_0, io_enq_bits_data_0, io_enq_bits_addr_0}) // @[util.scala:458:7, :503:22]
); // @[util.scala:503:22]
assign io_enq_ready = io_enq_ready_0; // @[util.scala:458:7]
assign io_deq_valid = io_deq_valid_0; // @[util.scala:458:7]
assign io_deq_bits_uop_inst = io_deq_bits_uop_inst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_debug_inst = io_deq_bits_uop_debug_inst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_rvc = io_deq_bits_uop_is_rvc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_debug_pc = io_deq_bits_uop_debug_pc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iq_type_0 = io_deq_bits_uop_iq_type_0_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iq_type_1 = io_deq_bits_uop_iq_type_1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iq_type_2 = io_deq_bits_uop_iq_type_2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iq_type_3 = io_deq_bits_uop_iq_type_3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_0 = io_deq_bits_uop_fu_code_0_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_1 = io_deq_bits_uop_fu_code_1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_2 = io_deq_bits_uop_fu_code_2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_3 = io_deq_bits_uop_fu_code_3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_4 = io_deq_bits_uop_fu_code_4_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_5 = io_deq_bits_uop_fu_code_5_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_6 = io_deq_bits_uop_fu_code_6_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_7 = io_deq_bits_uop_fu_code_7_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_8 = io_deq_bits_uop_fu_code_8_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_9 = io_deq_bits_uop_fu_code_9_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_issued = io_deq_bits_uop_iw_issued_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_issued_partial_agen = io_deq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_issued_partial_dgen = io_deq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p1_speculative_child = io_deq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p2_speculative_child = io_deq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p1_bypass_hint = io_deq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p2_bypass_hint = io_deq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p3_bypass_hint = io_deq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_dis_col_sel = io_deq_bits_uop_dis_col_sel_0; // @[util.scala:458:7]
assign io_deq_bits_uop_br_mask = io_deq_bits_uop_br_mask_0; // @[util.scala:458:7]
assign io_deq_bits_uop_br_tag = io_deq_bits_uop_br_tag_0; // @[util.scala:458:7]
assign io_deq_bits_uop_br_type = io_deq_bits_uop_br_type_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_sfb = io_deq_bits_uop_is_sfb_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_fence = io_deq_bits_uop_is_fence_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_fencei = io_deq_bits_uop_is_fencei_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_sfence = io_deq_bits_uop_is_sfence_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_amo = io_deq_bits_uop_is_amo_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_eret = io_deq_bits_uop_is_eret_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_sys_pc2epc = io_deq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_rocc = io_deq_bits_uop_is_rocc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_mov = io_deq_bits_uop_is_mov_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ftq_idx = io_deq_bits_uop_ftq_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_edge_inst = io_deq_bits_uop_edge_inst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_pc_lob = io_deq_bits_uop_pc_lob_0; // @[util.scala:458:7]
assign io_deq_bits_uop_taken = io_deq_bits_uop_taken_0; // @[util.scala:458:7]
assign io_deq_bits_uop_imm_rename = io_deq_bits_uop_imm_rename_0; // @[util.scala:458:7]
assign io_deq_bits_uop_imm_sel = io_deq_bits_uop_imm_sel_0; // @[util.scala:458:7]
assign io_deq_bits_uop_pimm = io_deq_bits_uop_pimm_0; // @[util.scala:458:7]
assign io_deq_bits_uop_imm_packed = io_deq_bits_uop_imm_packed_0; // @[util.scala:458:7]
assign io_deq_bits_uop_op1_sel = io_deq_bits_uop_op1_sel_0; // @[util.scala:458:7]
assign io_deq_bits_uop_op2_sel = io_deq_bits_uop_op2_sel_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_ldst = io_deq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_wen = io_deq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_ren1 = io_deq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_ren2 = io_deq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_ren3 = io_deq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_swap12 = io_deq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_swap23 = io_deq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_typeTagIn = io_deq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_typeTagOut = io_deq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_fromint = io_deq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_toint = io_deq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_fastpipe = io_deq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_fma = io_deq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_div = io_deq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_sqrt = io_deq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_wflags = io_deq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_vec = io_deq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7]
assign io_deq_bits_uop_rob_idx = io_deq_bits_uop_rob_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ldq_idx = io_deq_bits_uop_ldq_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_stq_idx = io_deq_bits_uop_stq_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_rxq_idx = io_deq_bits_uop_rxq_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_pdst = io_deq_bits_uop_pdst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs1 = io_deq_bits_uop_prs1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs2 = io_deq_bits_uop_prs2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs3 = io_deq_bits_uop_prs3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ppred = io_deq_bits_uop_ppred_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs1_busy = io_deq_bits_uop_prs1_busy_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs2_busy = io_deq_bits_uop_prs2_busy_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs3_busy = io_deq_bits_uop_prs3_busy_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ppred_busy = io_deq_bits_uop_ppred_busy_0; // @[util.scala:458:7]
assign io_deq_bits_uop_stale_pdst = io_deq_bits_uop_stale_pdst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_exception = io_deq_bits_uop_exception_0; // @[util.scala:458:7]
assign io_deq_bits_uop_exc_cause = io_deq_bits_uop_exc_cause_0; // @[util.scala:458:7]
assign io_deq_bits_uop_mem_cmd = io_deq_bits_uop_mem_cmd_0; // @[util.scala:458:7]
assign io_deq_bits_uop_mem_size = io_deq_bits_uop_mem_size_0; // @[util.scala:458:7]
assign io_deq_bits_uop_mem_signed = io_deq_bits_uop_mem_signed_0; // @[util.scala:458:7]
assign io_deq_bits_uop_uses_ldq = io_deq_bits_uop_uses_ldq_0; // @[util.scala:458:7]
assign io_deq_bits_uop_uses_stq = io_deq_bits_uop_uses_stq_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_unique = io_deq_bits_uop_is_unique_0; // @[util.scala:458:7]
assign io_deq_bits_uop_flush_on_commit = io_deq_bits_uop_flush_on_commit_0; // @[util.scala:458:7]
assign io_deq_bits_uop_csr_cmd = io_deq_bits_uop_csr_cmd_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ldst_is_rs1 = io_deq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ldst = io_deq_bits_uop_ldst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs1 = io_deq_bits_uop_lrs1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs2 = io_deq_bits_uop_lrs2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs3 = io_deq_bits_uop_lrs3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_dst_rtype = io_deq_bits_uop_dst_rtype_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs1_rtype = io_deq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs2_rtype = io_deq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7]
assign io_deq_bits_uop_frs3_en = io_deq_bits_uop_frs3_en_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fcn_dw = io_deq_bits_uop_fcn_dw_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fcn_op = io_deq_bits_uop_fcn_op_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_val = io_deq_bits_uop_fp_val_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_rm = io_deq_bits_uop_fp_rm_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_typ = io_deq_bits_uop_fp_typ_0; // @[util.scala:458:7]
assign io_deq_bits_uop_xcpt_pf_if = io_deq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_xcpt_ae_if = io_deq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_xcpt_ma_if = io_deq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_bp_debug_if = io_deq_bits_uop_bp_debug_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_bp_xcpt_if = io_deq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_debug_fsrc = io_deq_bits_uop_debug_fsrc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_debug_tsrc = io_deq_bits_uop_debug_tsrc_0; // @[util.scala:458:7]
assign io_deq_bits_addr = io_deq_bits_addr_0; // @[util.scala:458:7]
assign io_deq_bits_data = io_deq_bits_data_0; // @[util.scala:458:7]
assign io_deq_bits_is_hella = io_deq_bits_is_hella_0; // @[util.scala:458:7]
assign io_deq_bits_tag_match = io_deq_bits_tag_match_0; // @[util.scala:458:7]
assign io_deq_bits_old_meta_coh_state = io_deq_bits_old_meta_coh_state_0; // @[util.scala:458:7]
assign io_deq_bits_old_meta_tag = io_deq_bits_old_meta_tag_0; // @[util.scala:458:7]
assign io_deq_bits_way_en = io_deq_bits_way_en_0; // @[util.scala:458:7]
assign io_deq_bits_sdq_id = io_deq_bits_sdq_id_0; // @[util.scala:458:7]
assign io_empty = io_empty_0; // @[util.scala:458:7]
assign io_count = io_count_0; // @[util.scala:458:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_202( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_362 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File RegisterRouter.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.diplomacy.{AddressSet, TransferSizes}
import freechips.rocketchip.resources.{Device, Resource, ResourceBindings}
import freechips.rocketchip.prci.{NoCrossing}
import freechips.rocketchip.regmapper.{RegField, RegMapper, RegMapperParams, RegMapperInput, RegisterRouter}
import freechips.rocketchip.util.{BundleField, ControlKey, ElaborationArtefacts, GenRegDescsAnno}
import scala.math.min
class TLRegisterRouterExtraBundle(val sourceBits: Int, val sizeBits: Int) extends Bundle {
val source = UInt((sourceBits max 1).W)
val size = UInt((sizeBits max 1).W)
}
case object TLRegisterRouterExtra extends ControlKey[TLRegisterRouterExtraBundle]("tlrr_extra")
case class TLRegisterRouterExtraField(sourceBits: Int, sizeBits: Int) extends BundleField[TLRegisterRouterExtraBundle](TLRegisterRouterExtra, Output(new TLRegisterRouterExtraBundle(sourceBits, sizeBits)), x => {
x.size := 0.U
x.source := 0.U
})
/** TLRegisterNode is a specialized TL SinkNode that encapsulates MMIO registers.
* It provides functionality for describing and outputting metdata about the registers in several formats.
* It also provides a concrete implementation of a regmap function that will be used
* to wire a map of internal registers associated with this node to the node's interconnect port.
*/
case class TLRegisterNode(
address: Seq[AddressSet],
device: Device,
deviceKey: String = "reg/control",
concurrency: Int = 0,
beatBytes: Int = 4,
undefZero: Boolean = true,
executable: Boolean = false)(
implicit valName: ValName)
extends SinkNode(TLImp)(Seq(TLSlavePortParameters.v1(
Seq(TLSlaveParameters.v1(
address = address,
resources = Seq(Resource(device, deviceKey)),
executable = executable,
supportsGet = TransferSizes(1, beatBytes),
supportsPutPartial = TransferSizes(1, beatBytes),
supportsPutFull = TransferSizes(1, beatBytes),
fifoId = Some(0))), // requests are handled in order
beatBytes = beatBytes,
minLatency = min(concurrency, 1)))) with TLFormatNode // the Queue adds at most one cycle
{
val size = 1 << log2Ceil(1 + address.map(_.max).max - address.map(_.base).min)
require (size >= beatBytes)
address.foreach { case a =>
require (a.widen(size-1).base == address.head.widen(size-1).base,
s"TLRegisterNode addresses (${address}) must be aligned to its size ${size}")
}
// Calling this method causes the matching TL2 bundle to be
// configured to route all requests to the listed RegFields.
def regmap(mapping: RegField.Map*) = {
val (bundleIn, edge) = this.in(0)
val a = bundleIn.a
val d = bundleIn.d
val fields = TLRegisterRouterExtraField(edge.bundle.sourceBits, edge.bundle.sizeBits) +: a.bits.params.echoFields
val params = RegMapperParams(log2Up(size/beatBytes), beatBytes, fields)
val in = Wire(Decoupled(new RegMapperInput(params)))
in.bits.read := a.bits.opcode === TLMessages.Get
in.bits.index := edge.addr_hi(a.bits)
in.bits.data := a.bits.data
in.bits.mask := a.bits.mask
Connectable.waiveUnmatched(in.bits.extra, a.bits.echo) match {
case (lhs, rhs) => lhs :<= rhs
}
val a_extra = in.bits.extra(TLRegisterRouterExtra)
a_extra.source := a.bits.source
a_extra.size := a.bits.size
// Invoke the register map builder
val out = RegMapper(beatBytes, concurrency, undefZero, in, mapping:_*)
// No flow control needed
in.valid := a.valid
a.ready := in.ready
d.valid := out.valid
out.ready := d.ready
// We must restore the size to enable width adapters to work
val d_extra = out.bits.extra(TLRegisterRouterExtra)
d.bits := edge.AccessAck(toSource = d_extra.source, lgSize = d_extra.size)
// avoid a Mux on the data bus by manually overriding two fields
d.bits.data := out.bits.data
Connectable.waiveUnmatched(d.bits.echo, out.bits.extra) match {
case (lhs, rhs) => lhs :<= rhs
}
d.bits.opcode := Mux(out.bits.read, TLMessages.AccessAckData, TLMessages.AccessAck)
// Tie off unused channels
bundleIn.b.valid := false.B
bundleIn.c.ready := true.B
bundleIn.e.ready := true.B
genRegDescsJson(mapping:_*)
}
def genRegDescsJson(mapping: RegField.Map*): Unit = {
// Dump out the register map for documentation purposes.
val base = address.head.base
val baseHex = s"0x${base.toInt.toHexString}"
val name = s"${device.describe(ResourceBindings()).name}.At${baseHex}"
val json = GenRegDescsAnno.serialize(base, name, mapping:_*)
var suffix = 0
while( ElaborationArtefacts.contains(s"${baseHex}.${suffix}.regmap.json")) {
suffix = suffix + 1
}
ElaborationArtefacts.add(s"${baseHex}.${suffix}.regmap.json", json)
val module = Module.currentModule.get.asInstanceOf[RawModule]
GenRegDescsAnno.anno(
module,
base,
mapping:_*)
}
}
/** Mix HasTLControlRegMap into any subclass of RegisterRouter to gain helper functions for attaching a device control register map to TileLink.
* - The intended use case is that controlNode will diplomatically publish a SW-visible device's memory-mapped control registers.
* - Use the clock crossing helper controlXing to externally connect controlNode to a TileLink interconnect.
* - Use the mapping helper function regmap to internally fill out the space of device control registers.
*/
trait HasTLControlRegMap { this: RegisterRouter =>
protected val controlNode = TLRegisterNode(
address = address,
device = device,
deviceKey = "reg/control",
concurrency = concurrency,
beatBytes = beatBytes,
undefZero = undefZero,
executable = executable)
// Externally, this helper should be used to connect the register control port to a bus
val controlXing: TLInwardClockCrossingHelper = this.crossIn(controlNode)
// Backwards-compatibility default node accessor with no clock crossing
lazy val node: TLInwardNode = controlXing(NoCrossing)
// Internally, this function should be used to populate the control port with registers
protected def regmap(mapping: RegField.Map*): Unit = { controlNode.regmap(mapping:_*) }
}
File MuxLiteral.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.log2Ceil
import scala.reflect.ClassTag
/* MuxLiteral creates a lookup table from a key to a list of values.
* Unlike MuxLookup, the table keys must be exclusive literals.
*/
object MuxLiteral
{
def apply[T <: Data:ClassTag](index: UInt, default: T, first: (UInt, T), rest: (UInt, T)*): T =
apply(index, default, first :: rest.toList)
def apply[T <: Data:ClassTag](index: UInt, default: T, cases: Seq[(UInt, T)]): T =
MuxTable(index, default, cases.map { case (k, v) => (k.litValue, v) })
}
object MuxSeq
{
def apply[T <: Data:ClassTag](index: UInt, default: T, first: T, rest: T*): T =
apply(index, default, first :: rest.toList)
def apply[T <: Data:ClassTag](index: UInt, default: T, cases: Seq[T]): T =
MuxTable(index, default, cases.zipWithIndex.map { case (v, i) => (BigInt(i), v) })
}
object MuxTable
{
def apply[T <: Data:ClassTag](index: UInt, default: T, first: (BigInt, T), rest: (BigInt, T)*): T =
apply(index, default, first :: rest.toList)
def apply[T <: Data:ClassTag](index: UInt, default: T, cases: Seq[(BigInt, T)]): T = {
/* All keys must be >= 0 and distinct */
cases.foreach { case (k, _) => require (k >= 0) }
require (cases.map(_._1).distinct.size == cases.size)
/* Filter out any cases identical to the default */
val simple = cases.filter { case (k, v) => !default.isLit || !v.isLit || v.litValue != default.litValue }
val maxKey = (BigInt(0) +: simple.map(_._1)).max
val endIndex = BigInt(1) << log2Ceil(maxKey+1)
if (simple.isEmpty) {
default
} else if (endIndex <= 2*simple.size) {
/* The dense encoding case uses a Vec */
val table = Array.fill(endIndex.toInt) { default }
simple.foreach { case (k, v) => table(k.toInt) = v }
Mux(index >= endIndex.U, default, VecInit(table)(index))
} else {
/* The sparse encoding case uses switch */
val out = WireDefault(default)
simple.foldLeft(new chisel3.util.SwitchContext(index, None, Set.empty)) { case (acc, (k, v)) =>
acc.is (k.U) { out := v }
}
out
}
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File Control.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.regmapper._
import freechips.rocketchip.tilelink._
class InclusiveCacheControl(outer: InclusiveCache, control: InclusiveCacheControlParameters)(implicit p: Parameters) extends LazyModule()(p) {
val ctrlnode = TLRegisterNode(
address = Seq(AddressSet(control.address, InclusiveCacheParameters.L2ControlSize-1)),
device = outer.device,
concurrency = 1, // Only one flush at a time (else need to track who answers)
beatBytes = control.beatBytes)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
val io = IO(new Bundle {
val flush_match = Input(Bool())
val flush_req = Decoupled(UInt(64.W))
val flush_resp = Input(Bool())
})
// Flush directive
val flushInValid = RegInit(false.B)
val flushInAddress = Reg(UInt(64.W))
val flushOutValid = RegInit(false.B)
val flushOutReady = WireInit(init = false.B)
when (flushOutReady) { flushOutValid := false.B }
when (io.flush_resp) { flushOutValid := true.B }
when (io.flush_req.ready) { flushInValid := false.B }
io.flush_req.valid := flushInValid
io.flush_req.bits := flushInAddress
when (!io.flush_match && flushInValid) {
flushInValid := false.B
flushOutValid := true.B
}
val flush32 = RegField.w(32, RegWriteFn((ivalid, oready, data) => {
when (oready) { flushOutReady := true.B }
when (ivalid) { flushInValid := true.B }
when (ivalid && !flushInValid) { flushInAddress := data << 4 }
(!flushInValid, flushOutValid)
}), RegFieldDesc("Flush32", "Flush the physical address equal to the 32-bit written data << 4 from the cache"))
val flush64 = RegField.w(64, RegWriteFn((ivalid, oready, data) => {
when (oready) { flushOutReady := true.B }
when (ivalid) { flushInValid := true.B }
when (ivalid && !flushInValid) { flushInAddress := data }
(!flushInValid, flushOutValid)
}), RegFieldDesc("Flush64", "Flush the phsyical address equal to the 64-bit written data from the cache"))
// Information about the cache configuration
val banksR = RegField.r(8, outer.node.edges.in.size.U, RegFieldDesc("Banks",
"Number of banks in the cache", reset=Some(outer.node.edges.in.size)))
val waysR = RegField.r(8, outer.cache.ways.U, RegFieldDesc("Ways",
"Number of ways per bank", reset=Some(outer.cache.ways)))
val lgSetsR = RegField.r(8, log2Ceil(outer.cache.sets).U, RegFieldDesc("lgSets",
"Base-2 logarithm of the sets per bank", reset=Some(log2Ceil(outer.cache.sets))))
val lgBlockBytesR = RegField.r(8, log2Ceil(outer.cache.blockBytes).U, RegFieldDesc("lgBlockBytes",
"Base-2 logarithm of the bytes per cache block", reset=Some(log2Ceil(outer.cache.blockBytes))))
val regmap = ctrlnode.regmap(
0x000 -> RegFieldGroup("Config", Some("Information about the Cache Configuration"), Seq(banksR, waysR, lgSetsR, lgBlockBytesR)),
0x200 -> (if (control.beatBytes >= 8) Seq(flush64) else Nil),
0x240 -> Seq(flush32)
)
}
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module InclusiveCacheControl( // @[Control.scala:38:9]
input clock, // @[Control.scala:38:9]
input reset, // @[Control.scala:38:9]
output auto_ctrl_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_ctrl_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_ctrl_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_ctrl_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_ctrl_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [10:0] auto_ctrl_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [25:0] auto_ctrl_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_ctrl_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_ctrl_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_ctrl_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_ctrl_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_ctrl_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_ctrl_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_ctrl_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [10:0] auto_ctrl_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_ctrl_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
input io_flush_match, // @[Control.scala:39:16]
input io_flush_req_ready, // @[Control.scala:39:16]
output io_flush_req_valid, // @[Control.scala:39:16]
output [63:0] io_flush_req_bits, // @[Control.scala:39:16]
input io_flush_resp // @[Control.scala:39:16]
);
wire out_bits_read; // @[RegisterRouter.scala:87:24]
wire [10:0] out_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:87:24]
wire [8:0] in_bits_index; // @[RegisterRouter.scala:73:18]
wire in_bits_read; // @[RegisterRouter.scala:73:18]
wire _out_back_front_q_io_deq_valid; // @[RegisterRouter.scala:87:24]
wire _out_back_front_q_io_deq_bits_read; // @[RegisterRouter.scala:87:24]
wire [8:0] _out_back_front_q_io_deq_bits_index; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_back_front_q_io_deq_bits_data; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_back_front_q_io_deq_bits_mask; // @[RegisterRouter.scala:87:24]
wire auto_ctrl_in_a_valid_0 = auto_ctrl_in_a_valid; // @[Control.scala:38:9]
wire [2:0] auto_ctrl_in_a_bits_opcode_0 = auto_ctrl_in_a_bits_opcode; // @[Control.scala:38:9]
wire [2:0] auto_ctrl_in_a_bits_param_0 = auto_ctrl_in_a_bits_param; // @[Control.scala:38:9]
wire [1:0] auto_ctrl_in_a_bits_size_0 = auto_ctrl_in_a_bits_size; // @[Control.scala:38:9]
wire [10:0] auto_ctrl_in_a_bits_source_0 = auto_ctrl_in_a_bits_source; // @[Control.scala:38:9]
wire [25:0] auto_ctrl_in_a_bits_address_0 = auto_ctrl_in_a_bits_address; // @[Control.scala:38:9]
wire [7:0] auto_ctrl_in_a_bits_mask_0 = auto_ctrl_in_a_bits_mask; // @[Control.scala:38:9]
wire [63:0] auto_ctrl_in_a_bits_data_0 = auto_ctrl_in_a_bits_data; // @[Control.scala:38:9]
wire auto_ctrl_in_a_bits_corrupt_0 = auto_ctrl_in_a_bits_corrupt; // @[Control.scala:38:9]
wire auto_ctrl_in_d_ready_0 = auto_ctrl_in_d_ready; // @[Control.scala:38:9]
wire io_flush_match_0 = io_flush_match; // @[Control.scala:38:9]
wire io_flush_req_ready_0 = io_flush_req_ready; // @[Control.scala:38:9]
wire io_flush_resp_0 = io_flush_resp; // @[Control.scala:38:9]
wire [3:0][63:0] _GEN = '{64'h0, 64'h0, 64'h0, 64'h60A0801};
wire [8:0] out_maskMatch = 9'h1B7; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_13 = 8'h1; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_14 = 8'h1; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_prepend_T = 8'h1; // @[RegisterRouter.scala:87:24]
wire [11:0] out_prepend = 12'h801; // @[RegisterRouter.scala:87:24]
wire [15:0] _out_T_22 = 16'h801; // @[RegisterRouter.scala:87:24]
wire [15:0] _out_T_23 = 16'h801; // @[RegisterRouter.scala:87:24]
wire [15:0] _out_prepend_T_1 = 16'h801; // @[RegisterRouter.scala:87:24]
wire [19:0] out_prepend_1 = 20'hA0801; // @[RegisterRouter.scala:87:24]
wire [23:0] _out_T_31 = 24'hA0801; // @[RegisterRouter.scala:87:24]
wire [23:0] _out_T_32 = 24'hA0801; // @[RegisterRouter.scala:87:24]
wire [23:0] _out_prepend_T_2 = 24'hA0801; // @[RegisterRouter.scala:87:24]
wire [26:0] out_prepend_2 = 27'h60A0801; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_T_40 = 32'h60A0801; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_T_41 = 32'h60A0801; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_T_66 = 32'h0; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_T_67 = 32'h0; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_out_bits_data_WIRE_1_0 = 64'h60A0801; // @[MuxLiteral.scala:49:48]
wire [2:0] ctrlnodeIn_d_bits_d_opcode = 3'h0; // @[Edges.scala:792:17]
wire [63:0] _out_T_53 = 64'h0; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_T_54 = 64'h0; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_out_bits_data_WIRE_1_1 = 64'h0; // @[MuxLiteral.scala:49:48]
wire [63:0] _out_out_bits_data_WIRE_1_2 = 64'h0; // @[MuxLiteral.scala:49:48]
wire [63:0] _out_out_bits_data_WIRE_1_3 = 64'h0; // @[MuxLiteral.scala:49:48]
wire [63:0] ctrlnodeIn_d_bits_d_data = 64'h0; // @[Edges.scala:792:17]
wire auto_ctrl_in_d_bits_sink = 1'h0; // @[Control.scala:38:9]
wire auto_ctrl_in_d_bits_denied = 1'h0; // @[Control.scala:38:9]
wire auto_ctrl_in_d_bits_corrupt = 1'h0; // @[Control.scala:38:9]
wire ctrlnodeIn_d_bits_sink = 1'h0; // @[MixedNode.scala:551:17]
wire ctrlnodeIn_d_bits_denied = 1'h0; // @[MixedNode.scala:551:17]
wire ctrlnodeIn_d_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire _out_rifireMux_T_8 = 1'h0; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_18 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_wifireMux_T_9 = 1'h0; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_19 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_rofireMux_T_8 = 1'h0; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_18 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_wofireMux_T_9 = 1'h0; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_19 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_out_bits_data_T = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_out_bits_data_T_2 = 1'h0; // @[MuxLiteral.scala:49:17]
wire ctrlnodeIn_d_bits_d_sink = 1'h0; // @[Edges.scala:792:17]
wire ctrlnodeIn_d_bits_d_denied = 1'h0; // @[Edges.scala:792:17]
wire ctrlnodeIn_d_bits_d_corrupt = 1'h0; // @[Edges.scala:792:17]
wire [1:0] auto_ctrl_in_d_bits_param = 2'h0; // @[Control.scala:38:9]
wire [1:0] ctrlnodeIn_d_bits_param = 2'h0; // @[MixedNode.scala:551:17]
wire [1:0] ctrlnodeIn_d_bits_d_param = 2'h0; // @[Edges.scala:792:17]
wire ctrlnodeIn_a_ready; // @[MixedNode.scala:551:17]
wire out_rifireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_5 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_rifireMux_out_1 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_9 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_rifireMux_out_2 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_13 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_rifireMux_out_3 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_17 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_rifireMux_WIRE_1 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_rifireMux_WIRE_2 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_rifireMux_WIRE_3 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_rifireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_wifireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_6 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_wifireMux_out_1 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_10 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_wifireMux_WIRE_1 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_rofireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_5 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_rofireMux_out_1 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_9 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_rofireMux_out_2 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_13 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_rofireMux_out_3 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_17 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_rofireMux_WIRE_1 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_rofireMux_WIRE_2 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_rofireMux_WIRE_3 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_rofireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_wofireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_6 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_wofireMux_out_1 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_10 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_wofireMux_WIRE_1 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_out_bits_data_WIRE_1 = 1'h1; // @[MuxLiteral.scala:49:48]
wire ctrlnodeIn_a_valid = auto_ctrl_in_a_valid_0; // @[Control.scala:38:9]
wire [2:0] ctrlnodeIn_a_bits_opcode = auto_ctrl_in_a_bits_opcode_0; // @[Control.scala:38:9]
wire [2:0] ctrlnodeIn_a_bits_param = auto_ctrl_in_a_bits_param_0; // @[Control.scala:38:9]
wire [1:0] ctrlnodeIn_a_bits_size = auto_ctrl_in_a_bits_size_0; // @[Control.scala:38:9]
wire [10:0] ctrlnodeIn_a_bits_source = auto_ctrl_in_a_bits_source_0; // @[Control.scala:38:9]
wire [25:0] ctrlnodeIn_a_bits_address = auto_ctrl_in_a_bits_address_0; // @[Control.scala:38:9]
wire [7:0] ctrlnodeIn_a_bits_mask = auto_ctrl_in_a_bits_mask_0; // @[Control.scala:38:9]
wire [63:0] ctrlnodeIn_a_bits_data = auto_ctrl_in_a_bits_data_0; // @[Control.scala:38:9]
wire ctrlnodeIn_a_bits_corrupt = auto_ctrl_in_a_bits_corrupt_0; // @[Control.scala:38:9]
wire ctrlnodeIn_d_ready = auto_ctrl_in_d_ready_0; // @[Control.scala:38:9]
wire ctrlnodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] ctrlnodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] ctrlnodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [10:0] ctrlnodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [63:0] ctrlnodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire auto_ctrl_in_a_ready_0; // @[Control.scala:38:9]
wire [2:0] auto_ctrl_in_d_bits_opcode_0; // @[Control.scala:38:9]
wire [1:0] auto_ctrl_in_d_bits_size_0; // @[Control.scala:38:9]
wire [10:0] auto_ctrl_in_d_bits_source_0; // @[Control.scala:38:9]
wire [63:0] auto_ctrl_in_d_bits_data_0; // @[Control.scala:38:9]
wire auto_ctrl_in_d_valid_0; // @[Control.scala:38:9]
wire io_flush_req_valid_0; // @[Control.scala:38:9]
wire [63:0] io_flush_req_bits_0; // @[Control.scala:38:9]
wire in_ready; // @[RegisterRouter.scala:73:18]
assign auto_ctrl_in_a_ready_0 = ctrlnodeIn_a_ready; // @[Control.scala:38:9]
wire in_valid = ctrlnodeIn_a_valid; // @[RegisterRouter.scala:73:18]
wire [1:0] in_bits_extra_tlrr_extra_size = ctrlnodeIn_a_bits_size; // @[RegisterRouter.scala:73:18]
wire [10:0] in_bits_extra_tlrr_extra_source = ctrlnodeIn_a_bits_source; // @[RegisterRouter.scala:73:18]
wire [7:0] in_bits_mask = ctrlnodeIn_a_bits_mask; // @[RegisterRouter.scala:73:18]
wire [63:0] in_bits_data = ctrlnodeIn_a_bits_data; // @[RegisterRouter.scala:73:18]
wire out_ready = ctrlnodeIn_d_ready; // @[RegisterRouter.scala:87:24]
wire out_valid; // @[RegisterRouter.scala:87:24]
assign auto_ctrl_in_d_valid_0 = ctrlnodeIn_d_valid; // @[Control.scala:38:9]
assign auto_ctrl_in_d_bits_opcode_0 = ctrlnodeIn_d_bits_opcode; // @[Control.scala:38:9]
wire [1:0] ctrlnodeIn_d_bits_d_size; // @[Edges.scala:792:17]
assign auto_ctrl_in_d_bits_size_0 = ctrlnodeIn_d_bits_size; // @[Control.scala:38:9]
wire [10:0] ctrlnodeIn_d_bits_d_source; // @[Edges.scala:792:17]
assign auto_ctrl_in_d_bits_source_0 = ctrlnodeIn_d_bits_source; // @[Control.scala:38:9]
wire [63:0] out_bits_data; // @[RegisterRouter.scala:87:24]
assign auto_ctrl_in_d_bits_data_0 = ctrlnodeIn_d_bits_data; // @[Control.scala:38:9]
reg flushInValid; // @[Control.scala:45:33]
assign io_flush_req_valid_0 = flushInValid; // @[Control.scala:38:9, :45:33]
reg [63:0] flushInAddress; // @[Control.scala:46:29]
assign io_flush_req_bits_0 = flushInAddress; // @[Control.scala:38:9, :46:29]
reg flushOutValid; // @[Control.scala:47:33]
wire flushOutReady; // @[Control.scala:48:34]
wire _out_in_ready_T; // @[RegisterRouter.scala:87:24]
assign ctrlnodeIn_a_ready = in_ready; // @[RegisterRouter.scala:73:18]
wire _in_bits_read_T; // @[RegisterRouter.scala:74:36]
wire out_front_bits_read = in_bits_read; // @[RegisterRouter.scala:73:18, :87:24]
wire [8:0] out_front_bits_index = in_bits_index; // @[RegisterRouter.scala:73:18, :87:24]
wire [63:0] out_front_bits_data = in_bits_data; // @[RegisterRouter.scala:73:18, :87:24]
wire [7:0] out_front_bits_mask = in_bits_mask; // @[RegisterRouter.scala:73:18, :87:24]
wire [10:0] out_front_bits_extra_tlrr_extra_source = in_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:73:18, :87:24]
wire [1:0] out_front_bits_extra_tlrr_extra_size = in_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:73:18, :87:24]
assign _in_bits_read_T = ctrlnodeIn_a_bits_opcode == 3'h4; // @[RegisterRouter.scala:74:36]
assign in_bits_read = _in_bits_read_T; // @[RegisterRouter.scala:73:18, :74:36]
wire [22:0] _in_bits_index_T = ctrlnodeIn_a_bits_address[25:3]; // @[Edges.scala:192:34]
assign in_bits_index = _in_bits_index_T[8:0]; // @[RegisterRouter.scala:73:18, :75:19]
wire _out_out_valid_T; // @[RegisterRouter.scala:87:24]
assign ctrlnodeIn_d_valid = out_valid; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_out_bits_data_T_4; // @[RegisterRouter.scala:87:24]
wire _ctrlnodeIn_d_bits_opcode_T = out_bits_read; // @[RegisterRouter.scala:87:24, :105:25]
assign ctrlnodeIn_d_bits_data = out_bits_data; // @[RegisterRouter.scala:87:24]
assign ctrlnodeIn_d_bits_d_source = out_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:87:24]
wire [1:0] out_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:87:24]
assign ctrlnodeIn_d_bits_d_size = out_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:87:24]
wire _out_front_valid_T; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_T_42 = out_front_bits_data; // @[RegisterRouter.scala:87:24]
wire out_front_ready; // @[RegisterRouter.scala:87:24]
wire out_front_valid; // @[RegisterRouter.scala:87:24]
wire [8:0] out_findex = out_front_bits_index & 9'h1B7; // @[RegisterRouter.scala:87:24]
wire [8:0] out_bindex = _out_back_front_q_io_deq_bits_index & 9'h1B7; // @[RegisterRouter.scala:87:24]
wire _GEN_0 = out_findex == 9'h0; // @[RegisterRouter.scala:87:24]
wire _out_T; // @[RegisterRouter.scala:87:24]
assign _out_T = _GEN_0; // @[RegisterRouter.scala:87:24]
wire _out_T_2; // @[RegisterRouter.scala:87:24]
assign _out_T_2 = _GEN_0; // @[RegisterRouter.scala:87:24]
wire _out_T_4; // @[RegisterRouter.scala:87:24]
assign _out_T_4 = _GEN_0; // @[RegisterRouter.scala:87:24]
wire _GEN_1 = out_bindex == 9'h0; // @[RegisterRouter.scala:87:24]
wire _out_T_1; // @[RegisterRouter.scala:87:24]
assign _out_T_1 = _GEN_1; // @[RegisterRouter.scala:87:24]
wire _out_T_3; // @[RegisterRouter.scala:87:24]
assign _out_T_3 = _GEN_1; // @[RegisterRouter.scala:87:24]
wire _out_T_5; // @[RegisterRouter.scala:87:24]
assign _out_T_5 = _GEN_1; // @[RegisterRouter.scala:87:24]
wire _out_out_bits_data_WIRE_0 = _out_T_1; // @[MuxLiteral.scala:49:48]
wire _out_out_bits_data_WIRE_2 = _out_T_3; // @[MuxLiteral.scala:49:48]
wire _out_rifireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_out_bits_data_WIRE_3 = _out_T_5; // @[MuxLiteral.scala:49:48]
wire _out_rifireMux_T_11; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_15; // @[RegisterRouter.scala:87:24]
wire out_rivalid_0; // @[RegisterRouter.scala:87:24]
wire out_rivalid_1; // @[RegisterRouter.scala:87:24]
wire out_rivalid_2; // @[RegisterRouter.scala:87:24]
wire out_rivalid_3; // @[RegisterRouter.scala:87:24]
wire out_rivalid_4; // @[RegisterRouter.scala:87:24]
wire out_rivalid_5; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_12; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_16; // @[RegisterRouter.scala:87:24]
wire out_wivalid_0; // @[RegisterRouter.scala:87:24]
wire out_wivalid_1; // @[RegisterRouter.scala:87:24]
wire out_wivalid_2; // @[RegisterRouter.scala:87:24]
wire out_wivalid_3; // @[RegisterRouter.scala:87:24]
wire out_wivalid_4; // @[RegisterRouter.scala:87:24]
wire out_wivalid_5; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_11; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_15; // @[RegisterRouter.scala:87:24]
wire out_roready_0; // @[RegisterRouter.scala:87:24]
wire out_roready_1; // @[RegisterRouter.scala:87:24]
wire out_roready_2; // @[RegisterRouter.scala:87:24]
wire out_roready_3; // @[RegisterRouter.scala:87:24]
wire out_roready_4; // @[RegisterRouter.scala:87:24]
wire out_roready_5; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_12; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_16; // @[RegisterRouter.scala:87:24]
wire out_woready_0; // @[RegisterRouter.scala:87:24]
wire out_woready_1; // @[RegisterRouter.scala:87:24]
wire out_woready_2; // @[RegisterRouter.scala:87:24]
wire out_woready_3; // @[RegisterRouter.scala:87:24]
wire out_woready_4; // @[RegisterRouter.scala:87:24]
wire out_woready_5; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T = out_front_bits_mask[0]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_1 = out_front_bits_mask[1]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_2 = out_front_bits_mask[2]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_3 = out_front_bits_mask[3]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_4 = out_front_bits_mask[4]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_5 = out_front_bits_mask[5]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_6 = out_front_bits_mask[6]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_7 = out_front_bits_mask[7]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_8 = {8{_out_frontMask_T}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_9 = {8{_out_frontMask_T_1}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_10 = {8{_out_frontMask_T_2}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_11 = {8{_out_frontMask_T_3}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_12 = {8{_out_frontMask_T_4}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_13 = {8{_out_frontMask_T_5}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_14 = {8{_out_frontMask_T_6}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_15 = {8{_out_frontMask_T_7}}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_lo_lo = {_out_frontMask_T_9, _out_frontMask_T_8}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_lo_hi = {_out_frontMask_T_11, _out_frontMask_T_10}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_frontMask_lo = {out_frontMask_lo_hi, out_frontMask_lo_lo}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_hi_lo = {_out_frontMask_T_13, _out_frontMask_T_12}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_hi_hi = {_out_frontMask_T_15, _out_frontMask_T_14}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_frontMask_hi = {out_frontMask_hi_hi, out_frontMask_hi_lo}; // @[RegisterRouter.scala:87:24]
wire [63:0] out_frontMask = {out_frontMask_hi, out_frontMask_lo}; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_rimask_T_4 = out_frontMask; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_wimask_T_4 = out_frontMask; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T = _out_back_front_q_io_deq_bits_mask[0]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_1 = _out_back_front_q_io_deq_bits_mask[1]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_2 = _out_back_front_q_io_deq_bits_mask[2]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_3 = _out_back_front_q_io_deq_bits_mask[3]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_4 = _out_back_front_q_io_deq_bits_mask[4]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_5 = _out_back_front_q_io_deq_bits_mask[5]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_6 = _out_back_front_q_io_deq_bits_mask[6]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_7 = _out_back_front_q_io_deq_bits_mask[7]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_8 = {8{_out_backMask_T}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_9 = {8{_out_backMask_T_1}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_10 = {8{_out_backMask_T_2}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_11 = {8{_out_backMask_T_3}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_12 = {8{_out_backMask_T_4}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_13 = {8{_out_backMask_T_5}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_14 = {8{_out_backMask_T_6}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_15 = {8{_out_backMask_T_7}}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_lo_lo = {_out_backMask_T_9, _out_backMask_T_8}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_lo_hi = {_out_backMask_T_11, _out_backMask_T_10}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_backMask_lo = {out_backMask_lo_hi, out_backMask_lo_lo}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_hi_lo = {_out_backMask_T_13, _out_backMask_T_12}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_hi_hi = {_out_backMask_T_15, _out_backMask_T_14}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_backMask_hi = {out_backMask_hi_hi, out_backMask_hi_lo}; // @[RegisterRouter.scala:87:24]
wire [63:0] out_backMask = {out_backMask_hi, out_backMask_lo}; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_romask_T_4 = out_backMask; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_womask_T_4 = out_backMask; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_rimask_T = out_frontMask[7:0]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_wimask_T = out_frontMask[7:0]; // @[RegisterRouter.scala:87:24]
wire out_rimask = |_out_rimask_T; // @[RegisterRouter.scala:87:24]
wire out_wimask = &_out_wimask_T; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_romask_T = out_backMask[7:0]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_womask_T = out_backMask[7:0]; // @[RegisterRouter.scala:87:24]
wire out_romask = |_out_romask_T; // @[RegisterRouter.scala:87:24]
wire out_womask = &_out_womask_T; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid = out_rivalid_0 & out_rimask; // @[RegisterRouter.scala:87:24]
wire _out_T_7 = out_f_rivalid; // @[RegisterRouter.scala:87:24]
wire out_f_roready = out_roready_0 & out_romask; // @[RegisterRouter.scala:87:24]
wire _out_T_8 = out_f_roready; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid = out_wivalid_0 & out_wimask; // @[RegisterRouter.scala:87:24]
wire out_f_woready = out_woready_0 & out_womask; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_6 = _out_back_front_q_io_deq_bits_data[7:0]; // @[RegisterRouter.scala:87:24]
wire _out_T_9 = ~out_rimask; // @[RegisterRouter.scala:87:24]
wire _out_T_10 = ~out_wimask; // @[RegisterRouter.scala:87:24]
wire _out_T_11 = ~out_romask; // @[RegisterRouter.scala:87:24]
wire _out_T_12 = ~out_womask; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_rimask_T_1 = out_frontMask[15:8]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_wimask_T_1 = out_frontMask[15:8]; // @[RegisterRouter.scala:87:24]
wire out_rimask_1 = |_out_rimask_T_1; // @[RegisterRouter.scala:87:24]
wire out_wimask_1 = &_out_wimask_T_1; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_romask_T_1 = out_backMask[15:8]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_womask_T_1 = out_backMask[15:8]; // @[RegisterRouter.scala:87:24]
wire out_romask_1 = |_out_romask_T_1; // @[RegisterRouter.scala:87:24]
wire out_womask_1 = &_out_womask_T_1; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_1 = out_rivalid_1 & out_rimask_1; // @[RegisterRouter.scala:87:24]
wire _out_T_16 = out_f_rivalid_1; // @[RegisterRouter.scala:87:24]
wire out_f_roready_1 = out_roready_1 & out_romask_1; // @[RegisterRouter.scala:87:24]
wire _out_T_17 = out_f_roready_1; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_1 = out_wivalid_1 & out_wimask_1; // @[RegisterRouter.scala:87:24]
wire out_f_woready_1 = out_woready_1 & out_womask_1; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_15 = _out_back_front_q_io_deq_bits_data[15:8]; // @[RegisterRouter.scala:87:24]
wire _out_T_18 = ~out_rimask_1; // @[RegisterRouter.scala:87:24]
wire _out_T_19 = ~out_wimask_1; // @[RegisterRouter.scala:87:24]
wire _out_T_20 = ~out_romask_1; // @[RegisterRouter.scala:87:24]
wire _out_T_21 = ~out_womask_1; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_rimask_T_2 = out_frontMask[23:16]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_wimask_T_2 = out_frontMask[23:16]; // @[RegisterRouter.scala:87:24]
wire out_rimask_2 = |_out_rimask_T_2; // @[RegisterRouter.scala:87:24]
wire out_wimask_2 = &_out_wimask_T_2; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_romask_T_2 = out_backMask[23:16]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_womask_T_2 = out_backMask[23:16]; // @[RegisterRouter.scala:87:24]
wire out_romask_2 = |_out_romask_T_2; // @[RegisterRouter.scala:87:24]
wire out_womask_2 = &_out_womask_T_2; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_2 = out_rivalid_2 & out_rimask_2; // @[RegisterRouter.scala:87:24]
wire _out_T_25 = out_f_rivalid_2; // @[RegisterRouter.scala:87:24]
wire out_f_roready_2 = out_roready_2 & out_romask_2; // @[RegisterRouter.scala:87:24]
wire _out_T_26 = out_f_roready_2; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_2 = out_wivalid_2 & out_wimask_2; // @[RegisterRouter.scala:87:24]
wire out_f_woready_2 = out_woready_2 & out_womask_2; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_24 = _out_back_front_q_io_deq_bits_data[23:16]; // @[RegisterRouter.scala:87:24]
wire _out_T_27 = ~out_rimask_2; // @[RegisterRouter.scala:87:24]
wire _out_T_28 = ~out_wimask_2; // @[RegisterRouter.scala:87:24]
wire _out_T_29 = ~out_romask_2; // @[RegisterRouter.scala:87:24]
wire _out_T_30 = ~out_womask_2; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_rimask_T_3 = out_frontMask[31:24]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_wimask_T_3 = out_frontMask[31:24]; // @[RegisterRouter.scala:87:24]
wire out_rimask_3 = |_out_rimask_T_3; // @[RegisterRouter.scala:87:24]
wire out_wimask_3 = &_out_wimask_T_3; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_romask_T_3 = out_backMask[31:24]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_womask_T_3 = out_backMask[31:24]; // @[RegisterRouter.scala:87:24]
wire out_romask_3 = |_out_romask_T_3; // @[RegisterRouter.scala:87:24]
wire out_womask_3 = &_out_womask_T_3; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_3 = out_rivalid_3 & out_rimask_3; // @[RegisterRouter.scala:87:24]
wire _out_T_34 = out_f_rivalid_3; // @[RegisterRouter.scala:87:24]
wire out_f_roready_3 = out_roready_3 & out_romask_3; // @[RegisterRouter.scala:87:24]
wire _out_T_35 = out_f_roready_3; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_3 = out_wivalid_3 & out_wimask_3; // @[RegisterRouter.scala:87:24]
wire out_f_woready_3 = out_woready_3 & out_womask_3; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_33 = _out_back_front_q_io_deq_bits_data[31:24]; // @[RegisterRouter.scala:87:24]
wire _out_T_36 = ~out_rimask_3; // @[RegisterRouter.scala:87:24]
wire _out_T_37 = ~out_wimask_3; // @[RegisterRouter.scala:87:24]
wire _out_T_38 = ~out_romask_3; // @[RegisterRouter.scala:87:24]
wire _out_T_39 = ~out_womask_3; // @[RegisterRouter.scala:87:24]
wire out_rimask_4 = |_out_rimask_T_4; // @[RegisterRouter.scala:87:24]
wire out_wimask_4 = &_out_wimask_T_4; // @[RegisterRouter.scala:87:24]
wire out_romask_4 = |_out_romask_T_4; // @[RegisterRouter.scala:87:24]
wire out_womask_4 = &_out_womask_T_4; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_4 = out_rivalid_4 & out_rimask_4; // @[RegisterRouter.scala:87:24]
wire out_f_roready_4 = out_roready_4 & out_romask_4; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_4 = out_wivalid_4 & out_wimask_4; // @[RegisterRouter.scala:87:24]
wire out_f_woready_4 = out_woready_4 & out_womask_4; // @[RegisterRouter.scala:87:24]
wire _out_T_43 = ~flushInValid; // @[Control.scala:45:33, :71:23]
wire _out_T_44 = out_f_wivalid_4 & _out_T_43; // @[RegisterRouter.scala:87:24]
wire out_f_wiready = ~flushInValid; // @[Control.scala:45:33, :71:23, :72:8]
wire _out_T_45 = out_f_wivalid_4 & out_f_wiready; // @[RegisterRouter.scala:87:24]
wire _out_T_46 = flushOutValid & out_f_woready_4; // @[RegisterRouter.scala:87:24]
wire _out_T_47 = ~out_rimask_4; // @[RegisterRouter.scala:87:24]
wire _out_T_48 = ~out_wimask_4; // @[RegisterRouter.scala:87:24]
wire _out_T_49 = out_f_wiready | _out_T_48; // @[RegisterRouter.scala:87:24]
wire out_wifireMux_out_2 = _out_T_49; // @[RegisterRouter.scala:87:24]
wire _out_T_50 = ~out_romask_4; // @[RegisterRouter.scala:87:24]
wire _out_T_51 = ~out_womask_4; // @[RegisterRouter.scala:87:24]
wire _out_T_52 = flushOutValid | _out_T_51; // @[RegisterRouter.scala:87:24]
wire out_wofireMux_out_2 = _out_T_52; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_rimask_T_5 = out_frontMask[31:0]; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_wimask_T_5 = out_frontMask[31:0]; // @[RegisterRouter.scala:87:24]
wire out_rimask_5 = |_out_rimask_T_5; // @[RegisterRouter.scala:87:24]
wire out_wimask_5 = &_out_wimask_T_5; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_romask_T_5 = out_backMask[31:0]; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_womask_T_5 = out_backMask[31:0]; // @[RegisterRouter.scala:87:24]
wire out_romask_5 = |_out_romask_T_5; // @[RegisterRouter.scala:87:24]
wire out_womask_5 = &_out_womask_T_5; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_5 = out_rivalid_5 & out_rimask_5; // @[RegisterRouter.scala:87:24]
wire out_f_roready_5 = out_roready_5 & out_romask_5; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_5 = out_wivalid_5 & out_wimask_5; // @[RegisterRouter.scala:87:24]
wire out_f_woready_5 = out_woready_5 & out_womask_5; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_T_55 = out_front_bits_data[31:0]; // @[RegisterRouter.scala:87:24]
assign flushOutReady = out_f_woready_5 | out_f_woready_4; // @[RegisterRouter.scala:87:24]
wire _out_T_56 = ~flushInValid; // @[Control.scala:45:33, :64:23, :71:23]
wire _out_T_57 = out_f_wivalid_5 & _out_T_56; // @[RegisterRouter.scala:87:24]
wire [35:0] _out_flushInAddress_T = {_out_T_55, 4'h0}; // @[RegisterRouter.scala:87:24]
wire out_f_wiready_1 = ~flushInValid; // @[Control.scala:45:33, :65:8, :71:23]
wire _out_T_58 = out_f_wivalid_5 & out_f_wiready_1; // @[RegisterRouter.scala:87:24]
wire _out_T_59 = flushOutValid & out_f_woready_5; // @[RegisterRouter.scala:87:24]
wire _out_T_60 = ~out_rimask_5; // @[RegisterRouter.scala:87:24]
wire _out_T_61 = ~out_wimask_5; // @[RegisterRouter.scala:87:24]
wire _out_T_62 = out_f_wiready_1 | _out_T_61; // @[RegisterRouter.scala:87:24]
wire out_wifireMux_out_3 = _out_T_62; // @[RegisterRouter.scala:87:24]
wire _out_T_63 = ~out_romask_5; // @[RegisterRouter.scala:87:24]
wire _out_T_64 = ~out_womask_5; // @[RegisterRouter.scala:87:24]
wire _out_T_65 = flushOutValid | _out_T_64; // @[RegisterRouter.scala:87:24]
wire out_wofireMux_out_3 = _out_T_65; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T = out_front_bits_index[0]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_1 = out_front_bits_index[1]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_2 = out_front_bits_index[2]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_3 = out_front_bits_index[3]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_4 = out_front_bits_index[4]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_5 = out_front_bits_index[5]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_6 = out_front_bits_index[6]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_7 = out_front_bits_index[7]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_8 = out_front_bits_index[8]; // @[RegisterRouter.scala:87:24]
wire [1:0] out_iindex = {_out_iindex_T_6, _out_iindex_T_3}; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T = _out_back_front_q_io_deq_bits_index[0]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_1 = _out_back_front_q_io_deq_bits_index[1]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_2 = _out_back_front_q_io_deq_bits_index[2]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_3 = _out_back_front_q_io_deq_bits_index[3]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_4 = _out_back_front_q_io_deq_bits_index[4]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_5 = _out_back_front_q_io_deq_bits_index[5]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_6 = _out_back_front_q_io_deq_bits_index[6]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_7 = _out_back_front_q_io_deq_bits_index[7]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_8 = _out_back_front_q_io_deq_bits_index[8]; // @[RegisterRouter.scala:87:24]
wire [1:0] out_oindex = {_out_oindex_T_6, _out_oindex_T_3}; // @[RegisterRouter.scala:87:24]
wire [3:0] _out_frontSel_T = 4'h1 << out_iindex; // @[OneHot.scala:58:35]
wire out_frontSel_0 = _out_frontSel_T[0]; // @[OneHot.scala:58:35]
wire out_frontSel_1 = _out_frontSel_T[1]; // @[OneHot.scala:58:35]
wire out_frontSel_2 = _out_frontSel_T[2]; // @[OneHot.scala:58:35]
wire out_frontSel_3 = _out_frontSel_T[3]; // @[OneHot.scala:58:35]
wire [3:0] _out_backSel_T = 4'h1 << out_oindex; // @[OneHot.scala:58:35]
wire out_backSel_0 = _out_backSel_T[0]; // @[OneHot.scala:58:35]
wire out_backSel_1 = _out_backSel_T[1]; // @[OneHot.scala:58:35]
wire out_backSel_2 = _out_backSel_T[2]; // @[OneHot.scala:58:35]
wire out_backSel_3 = _out_backSel_T[3]; // @[OneHot.scala:58:35]
wire _GEN_2 = in_valid & out_front_ready; // @[RegisterRouter.scala:73:18, :87:24]
wire _out_rifireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_rifireMux_T = _GEN_2; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_wifireMux_T = _GEN_2; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_1 = _out_rifireMux_T & out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_2 = _out_rifireMux_T_1 & out_frontSel_0; // @[RegisterRouter.scala:87:24]
assign _out_rifireMux_T_3 = _out_rifireMux_T_2 & _out_T; // @[RegisterRouter.scala:87:24]
assign out_rivalid_0 = _out_rifireMux_T_3; // @[RegisterRouter.scala:87:24]
assign out_rivalid_1 = _out_rifireMux_T_3; // @[RegisterRouter.scala:87:24]
assign out_rivalid_2 = _out_rifireMux_T_3; // @[RegisterRouter.scala:87:24]
assign out_rivalid_3 = _out_rifireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_4 = ~_out_T; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_6 = _out_rifireMux_T_1 & out_frontSel_1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_7 = _out_rifireMux_T_6; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_10 = _out_rifireMux_T_1 & out_frontSel_2; // @[RegisterRouter.scala:87:24]
assign _out_rifireMux_T_11 = _out_rifireMux_T_10 & _out_T_2; // @[RegisterRouter.scala:87:24]
assign out_rivalid_4 = _out_rifireMux_T_11; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_12 = ~_out_T_2; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_14 = _out_rifireMux_T_1 & out_frontSel_3; // @[RegisterRouter.scala:87:24]
assign _out_rifireMux_T_15 = _out_rifireMux_T_14 & _out_T_4; // @[RegisterRouter.scala:87:24]
assign out_rivalid_5 = _out_rifireMux_T_15; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_16 = ~_out_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_1 = ~out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_2 = _out_wifireMux_T & _out_wifireMux_T_1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_3 = _out_wifireMux_T_2 & out_frontSel_0; // @[RegisterRouter.scala:87:24]
assign _out_wifireMux_T_4 = _out_wifireMux_T_3 & _out_T; // @[RegisterRouter.scala:87:24]
assign out_wivalid_0 = _out_wifireMux_T_4; // @[RegisterRouter.scala:87:24]
assign out_wivalid_1 = _out_wifireMux_T_4; // @[RegisterRouter.scala:87:24]
assign out_wivalid_2 = _out_wifireMux_T_4; // @[RegisterRouter.scala:87:24]
assign out_wivalid_3 = _out_wifireMux_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_5 = ~_out_T; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_7 = _out_wifireMux_T_2 & out_frontSel_1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_8 = _out_wifireMux_T_7; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_11 = _out_wifireMux_T_2 & out_frontSel_2; // @[RegisterRouter.scala:87:24]
assign _out_wifireMux_T_12 = _out_wifireMux_T_11 & _out_T_2; // @[RegisterRouter.scala:87:24]
assign out_wivalid_4 = _out_wifireMux_T_12; // @[RegisterRouter.scala:87:24]
wire out_wifireMux_all = _out_wifireMux_T_12 & _out_T_49; // @[ReduceOthers.scala:47:21]
wire _out_wifireMux_T_13 = ~_out_T_2; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_14 = out_wifireMux_out_2 | _out_wifireMux_T_13; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_WIRE_2 = _out_wifireMux_T_14; // @[MuxLiteral.scala:49:48]
wire _out_wifireMux_T_15 = _out_wifireMux_T_2 & out_frontSel_3; // @[RegisterRouter.scala:87:24]
assign _out_wifireMux_T_16 = _out_wifireMux_T_15 & _out_T_4; // @[RegisterRouter.scala:87:24]
assign out_wivalid_5 = _out_wifireMux_T_16; // @[RegisterRouter.scala:87:24]
wire out_wifireMux_all_1 = _out_wifireMux_T_16 & _out_T_62; // @[ReduceOthers.scala:47:21]
wire _out_wifireMux_T_17 = ~_out_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_18 = out_wifireMux_out_3 | _out_wifireMux_T_17; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_WIRE_3 = _out_wifireMux_T_18; // @[MuxLiteral.scala:49:48]
wire [3:0] _GEN_3 = {{_out_wifireMux_WIRE_3}, {_out_wifireMux_WIRE_2}, {1'h1}, {1'h1}}; // @[MuxLiteral.scala:49:{10,48}]
wire out_wifireMux = _GEN_3[out_iindex]; // @[MuxLiteral.scala:49:10]
wire _GEN_4 = _out_back_front_q_io_deq_valid & out_ready; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_rofireMux_T = _GEN_4; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_wofireMux_T = _GEN_4; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_1 = _out_rofireMux_T & _out_back_front_q_io_deq_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_2 = _out_rofireMux_T_1 & out_backSel_0; // @[RegisterRouter.scala:87:24]
assign _out_rofireMux_T_3 = _out_rofireMux_T_2 & _out_T_1; // @[RegisterRouter.scala:87:24]
assign out_roready_0 = _out_rofireMux_T_3; // @[RegisterRouter.scala:87:24]
assign out_roready_1 = _out_rofireMux_T_3; // @[RegisterRouter.scala:87:24]
assign out_roready_2 = _out_rofireMux_T_3; // @[RegisterRouter.scala:87:24]
assign out_roready_3 = _out_rofireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_4 = ~_out_T_1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_6 = _out_rofireMux_T_1 & out_backSel_1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_7 = _out_rofireMux_T_6; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_10 = _out_rofireMux_T_1 & out_backSel_2; // @[RegisterRouter.scala:87:24]
assign _out_rofireMux_T_11 = _out_rofireMux_T_10 & _out_T_3; // @[RegisterRouter.scala:87:24]
assign out_roready_4 = _out_rofireMux_T_11; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_12 = ~_out_T_3; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_14 = _out_rofireMux_T_1 & out_backSel_3; // @[RegisterRouter.scala:87:24]
assign _out_rofireMux_T_15 = _out_rofireMux_T_14 & _out_T_5; // @[RegisterRouter.scala:87:24]
assign out_roready_5 = _out_rofireMux_T_15; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_16 = ~_out_T_5; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_1 = ~_out_back_front_q_io_deq_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_2 = _out_wofireMux_T & _out_wofireMux_T_1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_3 = _out_wofireMux_T_2 & out_backSel_0; // @[RegisterRouter.scala:87:24]
assign _out_wofireMux_T_4 = _out_wofireMux_T_3 & _out_T_1; // @[RegisterRouter.scala:87:24]
assign out_woready_0 = _out_wofireMux_T_4; // @[RegisterRouter.scala:87:24]
assign out_woready_1 = _out_wofireMux_T_4; // @[RegisterRouter.scala:87:24]
assign out_woready_2 = _out_wofireMux_T_4; // @[RegisterRouter.scala:87:24]
assign out_woready_3 = _out_wofireMux_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_5 = ~_out_T_1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_7 = _out_wofireMux_T_2 & out_backSel_1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_8 = _out_wofireMux_T_7; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_11 = _out_wofireMux_T_2 & out_backSel_2; // @[RegisterRouter.scala:87:24]
assign _out_wofireMux_T_12 = _out_wofireMux_T_11 & _out_T_3; // @[RegisterRouter.scala:87:24]
assign out_woready_4 = _out_wofireMux_T_12; // @[RegisterRouter.scala:87:24]
wire out_wofireMux_all = _out_wofireMux_T_12 & _out_T_52; // @[ReduceOthers.scala:47:21]
wire _out_wofireMux_T_13 = ~_out_T_3; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_14 = out_wofireMux_out_2 | _out_wofireMux_T_13; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_WIRE_2 = _out_wofireMux_T_14; // @[MuxLiteral.scala:49:48]
wire _out_wofireMux_T_15 = _out_wofireMux_T_2 & out_backSel_3; // @[RegisterRouter.scala:87:24]
assign _out_wofireMux_T_16 = _out_wofireMux_T_15 & _out_T_5; // @[RegisterRouter.scala:87:24]
assign out_woready_5 = _out_wofireMux_T_16; // @[RegisterRouter.scala:87:24]
wire out_wofireMux_all_1 = _out_wofireMux_T_16 & _out_T_65; // @[ReduceOthers.scala:47:21]
wire _out_wofireMux_T_17 = ~_out_T_5; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_18 = out_wofireMux_out_3 | _out_wofireMux_T_17; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_WIRE_3 = _out_wofireMux_T_18; // @[MuxLiteral.scala:49:48]
wire [3:0] _GEN_5 = {{_out_wofireMux_WIRE_3}, {_out_wofireMux_WIRE_2}, {1'h1}, {1'h1}}; // @[MuxLiteral.scala:49:{10,48}]
wire out_wofireMux = _GEN_5[out_oindex]; // @[MuxLiteral.scala:49:10]
wire out_iready = out_front_bits_read | out_wifireMux; // @[MuxLiteral.scala:49:10]
wire out_oready = _out_back_front_q_io_deq_bits_read | out_wofireMux; // @[MuxLiteral.scala:49:10]
assign _out_in_ready_T = out_front_ready & out_iready; // @[RegisterRouter.scala:87:24]
assign in_ready = _out_in_ready_T; // @[RegisterRouter.scala:73:18, :87:24]
assign _out_front_valid_T = in_valid & out_iready; // @[RegisterRouter.scala:73:18, :87:24]
assign out_front_valid = _out_front_valid_T; // @[RegisterRouter.scala:87:24]
wire _out_front_q_io_deq_ready_T = out_ready & out_oready; // @[RegisterRouter.scala:87:24]
assign _out_out_valid_T = _out_back_front_q_io_deq_valid & out_oready; // @[RegisterRouter.scala:87:24]
assign out_valid = _out_out_valid_T; // @[RegisterRouter.scala:87:24]
wire [3:0] _GEN_6 = {{_out_out_bits_data_WIRE_3}, {_out_out_bits_data_WIRE_2}, {1'h1}, {_out_out_bits_data_WIRE_0}}; // @[MuxLiteral.scala:49:{10,48}]
wire _out_out_bits_data_T_1 = _GEN_6[out_oindex]; // @[MuxLiteral.scala:49:10]
wire [63:0] _out_out_bits_data_T_3 = _GEN[out_oindex]; // @[MuxLiteral.scala:49:10]
assign _out_out_bits_data_T_4 = _out_out_bits_data_T_1 ? _out_out_bits_data_T_3 : 64'h0; // @[MuxLiteral.scala:49:10]
assign out_bits_data = _out_out_bits_data_T_4; // @[RegisterRouter.scala:87:24]
assign ctrlnodeIn_d_bits_size = ctrlnodeIn_d_bits_d_size; // @[Edges.scala:792:17]
assign ctrlnodeIn_d_bits_source = ctrlnodeIn_d_bits_d_source; // @[Edges.scala:792:17]
assign ctrlnodeIn_d_bits_opcode = {2'h0, _ctrlnodeIn_d_bits_opcode_T}; // @[RegisterRouter.scala:105:{19,25}]
wire _T_1 = ~io_flush_match_0 & flushInValid; // @[Control.scala:38:9, :45:33, :56:{11,27}]
always @(posedge clock) begin // @[Control.scala:38:9]
if (reset) begin // @[Control.scala:38:9]
flushInValid <= 1'h0; // @[Control.scala:45:33]
flushOutValid <= 1'h0; // @[Control.scala:47:33]
end
else begin // @[Control.scala:38:9]
flushInValid <= out_f_wivalid_5 | out_f_wivalid_4 | ~(_T_1 | io_flush_req_ready_0) & flushInValid; // @[RegisterRouter.scala:87:24]
flushOutValid <= _T_1 | io_flush_resp_0 | ~flushOutReady & flushOutValid; // @[Control.scala:38:9, :47:33, :48:34, :50:{26,42}, :51:{26,42}, :56:{27,44}, :58:21]
end
if (_out_T_57) // @[Control.scala:64:20]
flushInAddress <= {28'h0, _out_flushInAddress_T}; // @[Control.scala:46:29, :64:{55,63}]
else if (_out_T_44) // @[Control.scala:71:20]
flushInAddress <= _out_T_42; // @[RegisterRouter.scala:87:24]
always @(posedge)
TLMonitor_34 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (ctrlnodeIn_a_ready), // @[MixedNode.scala:551:17]
.io_in_a_valid (ctrlnodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_in_a_bits_opcode (ctrlnodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_a_bits_param (ctrlnodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_in_a_bits_size (ctrlnodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_in_a_bits_source (ctrlnodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_in_a_bits_address (ctrlnodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_in_a_bits_mask (ctrlnodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_in_a_bits_data (ctrlnodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_in_a_bits_corrupt (ctrlnodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_d_ready (ctrlnodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_in_d_valid (ctrlnodeIn_d_valid), // @[MixedNode.scala:551:17]
.io_in_d_bits_opcode (ctrlnodeIn_d_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_d_bits_size (ctrlnodeIn_d_bits_size), // @[MixedNode.scala:551:17]
.io_in_d_bits_source (ctrlnodeIn_d_bits_source), // @[MixedNode.scala:551:17]
.io_in_d_bits_data (ctrlnodeIn_d_bits_data) // @[MixedNode.scala:551:17]
); // @[Nodes.scala:27:25]
Queue1_RegMapperInput_i9_m8 out_back_front_q ( // @[RegisterRouter.scala:87:24]
.clock (clock),
.reset (reset),
.io_enq_ready (out_front_ready),
.io_enq_valid (out_front_valid), // @[RegisterRouter.scala:87:24]
.io_enq_bits_read (out_front_bits_read), // @[RegisterRouter.scala:87:24]
.io_enq_bits_index (out_front_bits_index), // @[RegisterRouter.scala:87:24]
.io_enq_bits_data (out_front_bits_data), // @[RegisterRouter.scala:87:24]
.io_enq_bits_mask (out_front_bits_mask), // @[RegisterRouter.scala:87:24]
.io_enq_bits_extra_tlrr_extra_source (out_front_bits_extra_tlrr_extra_source), // @[RegisterRouter.scala:87:24]
.io_enq_bits_extra_tlrr_extra_size (out_front_bits_extra_tlrr_extra_size), // @[RegisterRouter.scala:87:24]
.io_deq_ready (_out_front_q_io_deq_ready_T), // @[RegisterRouter.scala:87:24]
.io_deq_valid (_out_back_front_q_io_deq_valid),
.io_deq_bits_read (_out_back_front_q_io_deq_bits_read),
.io_deq_bits_index (_out_back_front_q_io_deq_bits_index),
.io_deq_bits_data (_out_back_front_q_io_deq_bits_data),
.io_deq_bits_mask (_out_back_front_q_io_deq_bits_mask),
.io_deq_bits_extra_tlrr_extra_source (out_bits_extra_tlrr_extra_source),
.io_deq_bits_extra_tlrr_extra_size (out_bits_extra_tlrr_extra_size)
); // @[RegisterRouter.scala:87:24]
assign out_bits_read = _out_back_front_q_io_deq_bits_read; // @[RegisterRouter.scala:87:24]
assign auto_ctrl_in_a_ready = auto_ctrl_in_a_ready_0; // @[Control.scala:38:9]
assign auto_ctrl_in_d_valid = auto_ctrl_in_d_valid_0; // @[Control.scala:38:9]
assign auto_ctrl_in_d_bits_opcode = auto_ctrl_in_d_bits_opcode_0; // @[Control.scala:38:9]
assign auto_ctrl_in_d_bits_size = auto_ctrl_in_d_bits_size_0; // @[Control.scala:38:9]
assign auto_ctrl_in_d_bits_source = auto_ctrl_in_d_bits_source_0; // @[Control.scala:38:9]
assign auto_ctrl_in_d_bits_data = auto_ctrl_in_d_bits_data_0; // @[Control.scala:38:9]
assign io_flush_req_valid = io_flush_req_valid_0; // @[Control.scala:38:9]
assign io_flush_req_bits = io_flush_req_bits_0; // @[Control.scala:38:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Serdes.scala:
package testchipip.serdes
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy._
import org.chipsalliance.cde.config._
class GenericSerializer[T <: Data](t: T, flitWidth: Int) extends Module {
override def desiredName = s"GenericSerializer_${t.typeName}w${t.getWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(t))
val out = Decoupled(new Flit(flitWidth))
val busy = Output(Bool())
})
val dataBits = t.getWidth.max(flitWidth)
val dataBeats = (dataBits - 1) / flitWidth + 1
require(dataBeats >= 1)
val data = Reg(Vec(dataBeats, UInt(flitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready && beat === 0.U
io.out.valid := io.in.valid || beat =/= 0.U
io.out.bits.flit := Mux(beat === 0.U, io.in.bits.asUInt, data(beat))
when (io.out.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) {
data := io.in.bits.asTypeOf(Vec(dataBeats, UInt(flitWidth.W)))
data(0) := DontCare // unused, DCE this
}
}
io.busy := io.out.valid
}
class GenericDeserializer[T <: Data](t: T, flitWidth: Int) extends Module {
override def desiredName = s"GenericDeserializer_${t.typeName}w${t.getWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(t)
val busy = Output(Bool())
})
val dataBits = t.getWidth.max(flitWidth)
val dataBeats = (dataBits - 1) / flitWidth + 1
require(dataBeats >= 1)
val data = Reg(Vec(dataBeats-1, UInt(flitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready || beat =/= (dataBeats-1).U
io.out.valid := io.in.valid && beat === (dataBeats-1).U
io.out.bits := (if (dataBeats == 1) {
io.in.bits.flit.asTypeOf(t)
} else {
Cat(io.in.bits.flit, data.asUInt).asTypeOf(t)
})
when (io.in.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
if (dataBeats > 1) {
when (beat =/= (dataBeats-1).U) {
data(beat(log2Ceil(dataBeats-1)-1,0)) := io.in.bits.flit
}
}
}
io.busy := beat =/= 0.U
}
class FlitToPhit(flitWidth: Int, phitWidth: Int) extends Module {
override def desiredName = s"FlitToPhit_f${flitWidth}_p${phitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Phit(phitWidth))
})
require(flitWidth >= phitWidth)
val dataBeats = (flitWidth - 1) / phitWidth + 1
val data = Reg(Vec(dataBeats-1, UInt(phitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready && beat === 0.U
io.out.valid := io.in.valid || beat =/= 0.U
io.out.bits.phit := (if (dataBeats == 1) io.in.bits.flit else Mux(beat === 0.U, io.in.bits.flit, data(beat-1.U)))
when (io.out.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) {
data := io.in.bits.asTypeOf(Vec(dataBeats, UInt(phitWidth.W))).tail
}
}
}
object FlitToPhit {
def apply(flit: DecoupledIO[Flit], phitWidth: Int): DecoupledIO[Phit] = {
val flit2phit = Module(new FlitToPhit(flit.bits.flitWidth, phitWidth))
flit2phit.io.in <> flit
flit2phit.io.out
}
}
class PhitToFlit(flitWidth: Int, phitWidth: Int) extends Module {
override def desiredName = s"PhitToFlit_p${phitWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Phit(phitWidth)))
val out = Decoupled(new Flit(flitWidth))
})
require(flitWidth >= phitWidth)
val dataBeats = (flitWidth - 1) / phitWidth + 1
val data = Reg(Vec(dataBeats-1, UInt(phitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready || beat =/= (dataBeats-1).U
io.out.valid := io.in.valid && beat === (dataBeats-1).U
io.out.bits.flit := (if (dataBeats == 1) io.in.bits.phit else Cat(io.in.bits.phit, data.asUInt))
when (io.in.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
if (dataBeats > 1) {
when (beat =/= (dataBeats-1).U) {
data(beat) := io.in.bits.phit
}
}
}
}
object PhitToFlit {
def apply(phit: DecoupledIO[Phit], flitWidth: Int): DecoupledIO[Flit] = {
val phit2flit = Module(new PhitToFlit(flitWidth, phit.bits.phitWidth))
phit2flit.io.in <> phit
phit2flit.io.out
}
def apply(phit: ValidIO[Phit], flitWidth: Int): ValidIO[Flit] = {
val phit2flit = Module(new PhitToFlit(flitWidth, phit.bits.phitWidth))
phit2flit.io.in.valid := phit.valid
phit2flit.io.in.bits := phit.bits
when (phit.valid) { assert(phit2flit.io.in.ready) }
val out = Wire(Valid(new Flit(flitWidth)))
out.valid := phit2flit.io.out.valid
out.bits := phit2flit.io.out.bits
phit2flit.io.out.ready := true.B
out
}
}
class PhitArbiter(phitWidth: Int, flitWidth: Int, channels: Int) extends Module {
override def desiredName = s"PhitArbiter_p${phitWidth}_f${flitWidth}_n${channels}"
val io = IO(new Bundle {
val in = Flipped(Vec(channels, Decoupled(new Phit(phitWidth))))
val out = Decoupled(new Phit(phitWidth))
})
if (channels == 1) {
io.out <> io.in(0)
} else {
val headerWidth = log2Ceil(channels)
val headerBeats = (headerWidth - 1) / phitWidth + 1
val flitBeats = (flitWidth - 1) / phitWidth + 1
val beats = headerBeats + flitBeats
val beat = RegInit(0.U(log2Ceil(beats).W))
val chosen_reg = Reg(UInt(headerWidth.W))
val chosen_prio = PriorityEncoder(io.in.map(_.valid))
val chosen = Mux(beat === 0.U, chosen_prio, chosen_reg)
val header_idx = if (headerBeats == 1) 0.U else beat(log2Ceil(headerBeats)-1,0)
io.out.valid := VecInit(io.in.map(_.valid))(chosen)
io.out.bits.phit := Mux(beat < headerBeats.U,
chosen.asTypeOf(Vec(headerBeats, UInt(phitWidth.W)))(header_idx),
VecInit(io.in.map(_.bits.phit))(chosen))
for (i <- 0 until channels) {
io.in(i).ready := io.out.ready && beat >= headerBeats.U && chosen_reg === i.U
}
when (io.out.fire) {
beat := Mux(beat === (beats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) { chosen_reg := chosen_prio }
}
}
}
class PhitDemux(phitWidth: Int, flitWidth: Int, channels: Int) extends Module {
override def desiredName = s"PhitDemux_p${phitWidth}_f${flitWidth}_n${channels}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Phit(phitWidth)))
val out = Vec(channels, Decoupled(new Phit(phitWidth)))
})
if (channels == 1) {
io.out(0) <> io.in
} else {
val headerWidth = log2Ceil(channels)
val headerBeats = (headerWidth - 1) / phitWidth + 1
val flitBeats = (flitWidth - 1) / phitWidth + 1
val beats = headerBeats + flitBeats
val beat = RegInit(0.U(log2Ceil(beats).W))
val channel_vec = Reg(Vec(headerBeats, UInt(phitWidth.W)))
val channel = channel_vec.asUInt(log2Ceil(channels)-1,0)
val header_idx = if (headerBeats == 1) 0.U else beat(log2Ceil(headerBeats)-1,0)
io.in.ready := beat < headerBeats.U || VecInit(io.out.map(_.ready))(channel)
for (c <- 0 until channels) {
io.out(c).valid := io.in.valid && beat >= headerBeats.U && channel === c.U
io.out(c).bits.phit := io.in.bits.phit
}
when (io.in.fire) {
beat := Mux(beat === (beats-1).U, 0.U, beat + 1.U)
when (beat < headerBeats.U) {
channel_vec(header_idx) := io.in.bits.phit
}
}
}
}
class DecoupledFlitToCreditedFlit(flitWidth: Int, bufferSz: Int) extends Module {
override def desiredName = s"DecoupledFlitToCreditedFlit_f${flitWidth}_b${bufferSz}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Flit(flitWidth))
val credit = Flipped(Decoupled(new Flit(flitWidth)))
})
val creditWidth = log2Ceil(bufferSz)
require(creditWidth <= flitWidth)
val credits = RegInit(0.U((creditWidth+1).W))
val credit_incr = io.out.fire
val credit_decr = io.credit.fire
when (credit_incr || credit_decr) {
credits := credits + credit_incr - Mux(io.credit.valid, io.credit.bits.flit +& 1.U, 0.U)
}
io.out.valid := io.in.valid && credits < bufferSz.U
io.out.bits.flit := io.in.bits.flit
io.in.ready := io.out.ready && credits < bufferSz.U
io.credit.ready := true.B
}
class CreditedFlitToDecoupledFlit(flitWidth: Int, bufferSz: Int) extends Module {
override def desiredName = s"CreditedFlitToDecoupledFlit_f${flitWidth}_b${bufferSz}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Flit(flitWidth))
val credit = Decoupled(new Flit(flitWidth))
})
val creditWidth = log2Ceil(bufferSz)
require(creditWidth <= flitWidth)
val buffer = Module(new Queue(new Flit(flitWidth), bufferSz))
val credits = RegInit(0.U((creditWidth+1).W))
val credit_incr = buffer.io.deq.fire
val credit_decr = io.credit.fire
when (credit_incr || credit_decr) {
credits := credit_incr + Mux(credit_decr, 0.U, credits)
}
buffer.io.enq.valid := io.in.valid
buffer.io.enq.bits := io.in.bits
io.in.ready := true.B
when (io.in.valid) { assert(buffer.io.enq.ready) }
io.out <> buffer.io.deq
io.credit.valid := credits =/= 0.U
io.credit.bits.flit := credits - 1.U
}
| module GenericDeserializer_TLBeatw67_f32( // @[Serdes.scala:37:7]
input clock, // @[Serdes.scala:37:7]
input reset, // @[Serdes.scala:37:7]
output io_in_ready, // @[Serdes.scala:39:14]
input io_in_valid, // @[Serdes.scala:39:14]
input [31:0] io_in_bits_flit, // @[Serdes.scala:39:14]
input io_out_ready, // @[Serdes.scala:39:14]
output io_out_valid, // @[Serdes.scala:39:14]
output [64:0] io_out_bits_payload, // @[Serdes.scala:39:14]
output io_out_bits_head, // @[Serdes.scala:39:14]
output io_out_bits_tail, // @[Serdes.scala:39:14]
output io_busy // @[Serdes.scala:39:14]
);
wire io_in_valid_0 = io_in_valid; // @[Serdes.scala:37:7]
wire [31:0] io_in_bits_flit_0 = io_in_bits_flit; // @[Serdes.scala:37:7]
wire io_out_ready_0 = io_out_ready; // @[Serdes.scala:37:7]
wire _io_in_ready_T_1; // @[Serdes.scala:51:31]
wire _io_out_valid_T_1; // @[Serdes.scala:52:31]
wire [64:0] _io_out_bits_WIRE_payload; // @[Serdes.scala:56:47]
wire _io_out_bits_WIRE_head; // @[Serdes.scala:56:47]
wire _io_out_bits_WIRE_tail; // @[Serdes.scala:56:47]
wire _io_busy_T; // @[Serdes.scala:68:19]
wire io_in_ready_0; // @[Serdes.scala:37:7]
wire [64:0] io_out_bits_payload_0; // @[Serdes.scala:37:7]
wire io_out_bits_head_0; // @[Serdes.scala:37:7]
wire io_out_bits_tail_0; // @[Serdes.scala:37:7]
wire io_out_valid_0; // @[Serdes.scala:37:7]
wire io_busy_0; // @[Serdes.scala:37:7]
reg [31:0] data_0; // @[Serdes.scala:48:17]
reg [31:0] data_1; // @[Serdes.scala:48:17]
reg [1:0] beat; // @[Serdes.scala:49:21]
wire _io_in_ready_T = beat != 2'h2; // @[Serdes.scala:49:21, :51:39]
assign _io_in_ready_T_1 = io_out_ready_0 | _io_in_ready_T; // @[Serdes.scala:37:7, :51:{31,39}]
assign io_in_ready_0 = _io_in_ready_T_1; // @[Serdes.scala:37:7, :51:31]
wire _GEN = beat == 2'h2; // @[Serdes.scala:49:21, :52:39]
wire _io_out_valid_T; // @[Serdes.scala:52:39]
assign _io_out_valid_T = _GEN; // @[Serdes.scala:52:39]
wire _beat_T; // @[Serdes.scala:60:22]
assign _beat_T = _GEN; // @[Serdes.scala:52:39, :60:22]
assign _io_out_valid_T_1 = io_in_valid_0 & _io_out_valid_T; // @[Serdes.scala:37:7, :52:{31,39}]
assign io_out_valid_0 = _io_out_valid_T_1; // @[Serdes.scala:37:7, :52:31]
wire [63:0] _io_out_bits_T = {data_1, data_0}; // @[Serdes.scala:48:17, :56:31]
wire [95:0] _io_out_bits_T_1 = {io_in_bits_flit_0, _io_out_bits_T}; // @[Serdes.scala:37:7, :56:{8,31}]
wire [64:0] _io_out_bits_T_4; // @[Serdes.scala:56:47]
assign io_out_bits_payload_0 = _io_out_bits_WIRE_payload; // @[Serdes.scala:37:7, :56:47]
wire _io_out_bits_T_3; // @[Serdes.scala:56:47]
assign io_out_bits_head_0 = _io_out_bits_WIRE_head; // @[Serdes.scala:37:7, :56:47]
wire _io_out_bits_T_2; // @[Serdes.scala:56:47]
assign io_out_bits_tail_0 = _io_out_bits_WIRE_tail; // @[Serdes.scala:37:7, :56:47]
wire [66:0] _io_out_bits_WIRE_1 = _io_out_bits_T_1[66:0]; // @[Serdes.scala:56:{8,47}]
assign _io_out_bits_T_2 = _io_out_bits_WIRE_1[0]; // @[Serdes.scala:56:47]
assign _io_out_bits_WIRE_tail = _io_out_bits_T_2; // @[Serdes.scala:56:47]
assign _io_out_bits_T_3 = _io_out_bits_WIRE_1[1]; // @[Serdes.scala:56:47]
assign _io_out_bits_WIRE_head = _io_out_bits_T_3; // @[Serdes.scala:56:47]
assign _io_out_bits_T_4 = _io_out_bits_WIRE_1[66:2]; // @[Serdes.scala:56:47]
assign _io_out_bits_WIRE_payload = _io_out_bits_T_4; // @[Serdes.scala:56:47]
wire [2:0] _beat_T_1 = {1'h0, beat} + 3'h1; // @[Serdes.scala:49:21, :60:53]
wire [1:0] _beat_T_2 = _beat_T_1[1:0]; // @[Serdes.scala:60:53]
wire [1:0] _beat_T_3 = _beat_T ? 2'h0 : _beat_T_2; // @[Serdes.scala:60:{16,22,53}]
assign _io_busy_T = |beat; // @[Serdes.scala:49:21, :68:19]
assign io_busy_0 = _io_busy_T; // @[Serdes.scala:37:7, :68:19]
wire _T = io_in_ready_0 & io_in_valid_0; // @[Decoupled.scala:51:35]
always @(posedge clock) begin // @[Serdes.scala:37:7]
if (~_T | _GEN | beat[0]) begin // @[Decoupled.scala:51:35]
end
else // @[Serdes.scala:48:17, :51:39, :59:21, :62:39, :63:47]
data_0 <= io_in_bits_flit_0; // @[Serdes.scala:37:7, :48:17]
if (~_T | _GEN | ~(beat[0])) begin // @[Decoupled.scala:51:35]
end
else // @[Serdes.scala:48:17, :51:39, :59:21, :62:39, :63:47]
data_1 <= io_in_bits_flit_0; // @[Serdes.scala:37:7, :48:17]
if (reset) // @[Serdes.scala:37:7]
beat <= 2'h0; // @[Serdes.scala:49:21]
else if (_T) // @[Decoupled.scala:51:35]
beat <= _beat_T_3; // @[Serdes.scala:49:21, :60:16]
always @(posedge)
assign io_in_ready = io_in_ready_0; // @[Serdes.scala:37:7]
assign io_out_valid = io_out_valid_0; // @[Serdes.scala:37:7]
assign io_out_bits_payload = io_out_bits_payload_0; // @[Serdes.scala:37:7]
assign io_out_bits_head = io_out_bits_head_0; // @[Serdes.scala:37:7]
assign io_out_bits_tail = io_out_bits_tail_0; // @[Serdes.scala:37:7]
assign io_busy = io_busy_0; // @[Serdes.scala:37:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File UnsafeAXI4ToTL.scala:
package ara
import chisel3._
import chisel3.util._
import freechips.rocketchip.amba._
import freechips.rocketchip.amba.axi4._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle {
val data = UInt(dataWidth.W)
val resp = UInt(respWidth.W)
val last = Bool()
val user = BundleMap(userFields)
}
/** Parameters for [[BaseReservableListBuffer]] and all child classes.
*
* @param numEntries Total number of elements that can be stored in the 'data' RAM
* @param numLists Maximum number of linked lists
* @param numBeats Maximum number of beats per entry
*/
case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) {
// Avoid zero-width wires when we call 'log2Ceil'
val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries)
val listBits = if (numLists == 1) 1 else log2Ceil(numLists)
val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats)
}
case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName)
extends MixedAdapterNode(AXI4Imp, TLImp)(
dFn = { case mp =>
TLMasterPortParameters.v2(
masters = mp.masters.zipWithIndex.map { case (m, i) =>
// Support 'numTlTxns' read requests and 'numTlTxns' write requests at once.
val numSourceIds = numTlTxns * 2
TLMasterParameters.v2(
name = m.name,
sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds),
nodePath = m.nodePath
)
},
echoFields = mp.echoFields,
requestFields = AMBAProtField() +: mp.requestFields,
responseKeys = mp.responseKeys
)
},
uFn = { mp =>
AXI4SlavePortParameters(
slaves = mp.managers.map { m =>
val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits))
AXI4SlaveParameters(
address = m.address,
resources = m.resources,
regionType = m.regionType,
executable = m.executable,
nodePath = m.nodePath,
supportsWrite = m.supportsPutPartial.intersect(maxXfer),
supportsRead = m.supportsGet.intersect(maxXfer),
interleavedId = Some(0) // TL2 never interleaves D beats
)
},
beatBytes = mp.beatBytes,
minLatency = mp.minLatency,
responseFields = mp.responseFields,
requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt)
)
}
)
class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule {
require(numTlTxns >= 1)
require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2")
val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt)
lazy val module = new LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
edgeIn.master.masters.foreach { m =>
require(m.aligned, "AXI4ToTL requires aligned requests")
}
val numIds = edgeIn.master.endId
val beatBytes = edgeOut.slave.beatBytes
val maxTransfer = edgeOut.slave.maxTransfer
val maxBeats = maxTransfer / beatBytes
// Look for an Error device to redirect bad requests
val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError")
require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.")
val errorDev = errorDevs.maxBy(_.maxTransfer)
val errorDevAddr = errorDev.address.head.base
require(
errorDev.supportsPutPartial.contains(maxTransfer),
s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer"
)
require(
errorDev.supportsGet.contains(maxTransfer),
s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer"
)
// All of the read-response reordering logic.
val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields)
val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats)
val listBuffer = if (numTlTxns > 1) {
Module(new ReservableListBuffer(listBufData, listBufParams))
} else {
Module(new PassthroughListBuffer(listBufData, listBufParams))
}
// To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to
// 0 for read requests and 1 for write requests.
val isReadSourceBit = 0.U(1.W)
val isWriteSourceBit = 1.U(1.W)
/* Read request logic */
val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val rBytes1 = in.ar.bits.bytes1()
val rSize = OH1ToUInt(rBytes1)
val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize)
val rId = if (numTlTxns > 1) {
Cat(isReadSourceBit, listBuffer.ioReservedIndex)
} else {
isReadSourceBit
}
val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Indicates if there are still valid TileLink source IDs left to use.
val canIssueR = listBuffer.ioReserve.ready
listBuffer.ioReserve.bits := in.ar.bits.id
listBuffer.ioReserve.valid := in.ar.valid && rOut.ready
in.ar.ready := rOut.ready && canIssueR
rOut.valid := in.ar.valid && canIssueR
rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2
rOut.bits.user :<= in.ar.bits.user
rOut.bits.user.lift(AMBAProt).foreach { rProt =>
rProt.privileged := in.ar.bits.prot(0)
rProt.secure := !in.ar.bits.prot(1)
rProt.fetch := in.ar.bits.prot(2)
rProt.bufferable := in.ar.bits.cache(0)
rProt.modifiable := in.ar.bits.cache(1)
rProt.readalloc := in.ar.bits.cache(2)
rProt.writealloc := in.ar.bits.cache(3)
}
/* Write request logic */
// Strip off the MSB, which identifies the transaction as read vs write.
val strippedResponseSourceId = if (numTlTxns > 1) {
out.d.bits.source((out.d.bits.source).getWidth - 2, 0)
} else {
// When there's only 1 TileLink transaction allowed for read/write, then this field is always 0.
0.U(1.W)
}
// Track when a write request burst is in progress.
val writeBurstBusy = RegInit(false.B)
when(in.w.fire) {
writeBurstBusy := !in.w.bits.last
}
val usedWriteIds = RegInit(0.U(numTlTxns.W))
val canIssueW = !usedWriteIds.andR
val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W))
val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W))
usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet
// Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't
// change mid-burst.
val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W))
val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy
val freeWriteIdIndex = OHToUInt(freeWriteIdOH)
freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds
val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val wBytes1 = in.aw.bits.bytes1()
val wSize = OH1ToUInt(wBytes1)
val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize)
val wId = if (numTlTxns > 1) {
Cat(isWriteSourceBit, freeWriteIdIndex)
} else {
isWriteSourceBit
}
val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain
// asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but
// the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb
// bits during a W-channel burst.
in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW
in.w.ready := wOut.ready && in.aw.valid && canIssueW
wOut.valid := in.aw.valid && in.w.valid && canIssueW
wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2
in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ }
wOut.bits.user :<= in.aw.bits.user
wOut.bits.user.lift(AMBAProt).foreach { wProt =>
wProt.privileged := in.aw.bits.prot(0)
wProt.secure := !in.aw.bits.prot(1)
wProt.fetch := in.aw.bits.prot(2)
wProt.bufferable := in.aw.bits.cache(0)
wProt.modifiable := in.aw.bits.cache(1)
wProt.readalloc := in.aw.bits.cache(2)
wProt.writealloc := in.aw.bits.cache(3)
}
// Merge the AXI4 read/write requests into the TL-A channel.
TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut))
/* Read/write response logic */
val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle)))
val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle)))
val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY)
val dHasData = edgeOut.hasData(out.d.bits)
val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d)
val dNumBeats1 = edgeOut.numBeats1(out.d.bits)
// Handle cases where writeack arrives before write is done
val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U
out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck)
listBuffer.ioDataOut.ready := okR.ready
okR.valid := listBuffer.ioDataOut.valid
okB.valid := out.d.valid && !dHasData && !writeEarlyAck
listBuffer.ioResponse.valid := out.d.valid && dHasData
listBuffer.ioResponse.bits.index := strippedResponseSourceId
listBuffer.ioResponse.bits.data.data := out.d.bits.data
listBuffer.ioResponse.bits.data.resp := dResp
listBuffer.ioResponse.bits.data.last := dLast
listBuffer.ioResponse.bits.data.user :<= out.d.bits.user
listBuffer.ioResponse.bits.count := dCount
listBuffer.ioResponse.bits.numBeats1 := dNumBeats1
okR.bits.id := listBuffer.ioDataOut.bits.listIndex
okR.bits.data := listBuffer.ioDataOut.bits.payload.data
okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp
okR.bits.last := listBuffer.ioDataOut.bits.payload.last
okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user
// Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write
// response, mark the write transaction as complete.
val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W))
val writeResponseId = writeIdMap.read(strippedResponseSourceId)
when(wOut.fire) {
writeIdMap.write(freeWriteIdIndex, in.aw.bits.id)
}
when(edgeOut.done(wOut)) {
usedWriteIdsSet := freeWriteIdOH
}
when(okB.fire) {
usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns)
}
okB.bits.id := writeResponseId
okB.bits.resp := dResp
okB.bits.user :<= out.d.bits.user
// AXI4 needs irrevocable behaviour
in.r <> Queue.irrevocable(okR, 1, flow = true)
in.b <> Queue.irrevocable(okB, 1, flow = true)
// Unused channels
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
/* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */
def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = {
val lReqType = reqType.toLowerCase
when(a.valid) {
assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U)
// Narrow transfers and FIXED bursts must be single-beat bursts.
when(a.bits.len =/= 0.U) {
assert(
a.bits.size === log2Ceil(beatBytes).U,
s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)",
1.U << a.bits.size,
a.bits.len + 1.U
)
assert(
a.bits.burst =/= AXI4Parameters.BURST_FIXED,
s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)",
a.bits.len + 1.U
)
}
// Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in
// particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink
// Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts.
}
}
checkRequest(in.ar, "Read")
checkRequest(in.aw, "Write")
}
}
}
object UnsafeAXI4ToTL {
def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = {
val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt))
axi42tl.node
}
}
/* ReservableListBuffer logic, and associated classes. */
class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle {
val index = UInt(params.entryBits.W)
val count = UInt(params.beatBits.W)
val numBeats1 = UInt(params.beatBits.W)
}
class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle {
val listIndex = UInt(params.listBits.W)
}
/** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */
abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends Module {
require(params.numEntries > 0)
require(params.numLists > 0)
val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W))))
val ioReservedIndex = IO(Output(UInt(params.entryBits.W)))
val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params))))
val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params)))
}
/** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve
* linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the
* 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a
* given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order.
*
* ==Constructor==
* @param gen Chisel type of linked list data element
* @param params Other parameters
*
* ==Module IO==
* @param ioReserve Index of list to reserve a new element in
* @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire'
* @param ioResponse Payload containing response data and linked-list-entry index
* @param ioDataOut Payload containing data read from response linked list and linked list index
*/
class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
val valid = RegInit(0.U(params.numLists.W))
val head = Mem(params.numLists, UInt(params.entryBits.W))
val tail = Mem(params.numLists, UInt(params.entryBits.W))
val used = RegInit(0.U(params.numEntries.W))
val next = Mem(params.numEntries, UInt(params.entryBits.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) }
val dataIsPresent = RegInit(0.U(params.numEntries.W))
val beats = Mem(params.numEntries, UInt(params.beatBits.W))
// The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower.
val dataMemReadEnable = WireDefault(false.B)
val dataMemWriteEnable = WireDefault(false.B)
assert(!(dataMemReadEnable && dataMemWriteEnable))
// 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the
// lowest-index entry in the 'data' RAM which is free.
val freeOH = Wire(UInt(params.numEntries.W))
val freeIndex = OHToUInt(freeOH)
freeOH := ~(leftOR(~used) << 1) & ~used
ioReservedIndex := freeIndex
val validSet = WireDefault(0.U(params.numLists.W))
val validClr = WireDefault(0.U(params.numLists.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
val dataIsPresentSet = WireDefault(0.U(params.numEntries.W))
val dataIsPresentClr = WireDefault(0.U(params.numEntries.W))
valid := (valid & ~validClr) | validSet
used := (used & ~usedClr) | usedSet
dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet
/* Reservation logic signals */
val reserveTail = Wire(UInt(params.entryBits.W))
val reserveIsValid = Wire(Bool())
/* Response logic signals */
val responseIndex = Wire(UInt(params.entryBits.W))
val responseListIndex = Wire(UInt(params.listBits.W))
val responseHead = Wire(UInt(params.entryBits.W))
val responseTail = Wire(UInt(params.entryBits.W))
val nextResponseHead = Wire(UInt(params.entryBits.W))
val nextDataIsPresent = Wire(Bool())
val isResponseInOrder = Wire(Bool())
val isEndOfList = Wire(Bool())
val isLastBeat = Wire(Bool())
val isLastResponseBeat = Wire(Bool())
val isLastUnwindBeat = Wire(Bool())
/* Reservation logic */
reserveTail := tail.read(ioReserve.bits)
reserveIsValid := valid(ioReserve.bits)
ioReserve.ready := !used.andR
// When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we
// actually start a new list, rather than appending to a list that's about to disappear.
val reserveResponseSameList = ioReserve.bits === responseListIndex
val appendToAndDestroyList =
ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat
when(ioReserve.fire) {
validSet := UIntToOH(ioReserve.bits, params.numLists)
usedSet := freeOH
when(reserveIsValid && !appendToAndDestroyList) {
next.write(reserveTail, freeIndex)
}.otherwise {
head.write(ioReserve.bits, freeIndex)
}
tail.write(ioReserve.bits, freeIndex)
map.write(freeIndex, ioReserve.bits)
}
/* Response logic */
// The majority of the response logic (reading from and writing to the various RAMs) is common between the
// response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid).
// The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the
// 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and
// response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after
// two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker.
responseHead := head.read(responseListIndex)
responseTail := tail.read(responseListIndex)
nextResponseHead := next.read(responseIndex)
nextDataIsPresent := dataIsPresent(nextResponseHead)
// Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since
// there isn't a next element in the linked list.
isResponseInOrder := responseHead === responseIndex
isEndOfList := responseHead === responseTail
isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1
// When a response's last beat is sent to the output channel, mark it as completed. This can happen in two
// situations:
// 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM
// reservation was never needed.
// 2. An entry is read out of the 'data' SRAM (within the unwind FSM).
when(ioDataOut.fire && isLastBeat) {
// Mark the reservation as no-longer-used.
usedClr := UIntToOH(responseIndex, params.numEntries)
// If the response is in-order, then we're popping an element from this linked list.
when(isEndOfList) {
// Once we pop the last element from a linked list, mark it as no-longer-present.
validClr := UIntToOH(responseListIndex, params.numLists)
}.otherwise {
// Move the linked list's head pointer to the new head pointer.
head.write(responseListIndex, nextResponseHead)
}
}
// If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding.
when(ioResponse.fire && !isResponseInOrder) {
dataMemWriteEnable := true.B
when(isLastResponseBeat) {
dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries)
beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1)
}
}
// Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to.
val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats)
(responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) =>
when(select && dataMemWriteEnable) {
seqMem.write(ioResponse.bits.index, ioResponse.bits.data)
}
}
/* Response unwind logic */
// Unwind FSM state definitions
val sIdle :: sUnwinding :: Nil = Enum(2)
val unwindState = RegInit(sIdle)
val busyUnwinding = unwindState === sUnwinding
val startUnwind = Wire(Bool())
val stopUnwind = Wire(Bool())
when(startUnwind) {
unwindState := sUnwinding
}.elsewhen(stopUnwind) {
unwindState := sIdle
}
assert(!(startUnwind && stopUnwind))
// Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to
// become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is
// invalid.
//
// Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to
// worry about overwriting the 'data' SRAM's output when we start the unwind FSM.
startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent
// Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of
// two things happens:
// 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent)
// 2. There are no more outstanding responses in this list (isEndOfList)
//
// Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are
// passing from 'ioResponse' to 'ioDataOut'.
stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList)
val isUnwindBurstOver = Wire(Bool())
val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable)
// Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of
// beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we
// increment 'beatCounter' until it reaches 'unwindBeats1'.
val unwindBeats1 = Reg(UInt(params.beatBits.W))
val nextBeatCounter = Wire(UInt(params.beatBits.W))
val beatCounter = RegNext(nextBeatCounter)
isUnwindBurstOver := beatCounter === unwindBeats1
when(startNewBurst) {
unwindBeats1 := beats.read(nextResponseHead)
nextBeatCounter := 0.U
}.elsewhen(dataMemReadEnable) {
nextBeatCounter := beatCounter + 1.U
}.otherwise {
nextBeatCounter := beatCounter
}
// When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next
// entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which
// happens at the start of reading a new stored burst).
val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst)
responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index)
// Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the
// SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead
// holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'.
val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex)
// The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid
// until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle).
val unwindDataIsValid = RegInit(false.B)
when(dataMemReadEnable) {
unwindDataIsValid := true.B
}.elsewhen(ioDataOut.fire) {
unwindDataIsValid := false.B
}
isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid
// Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats.
isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat)
// Select which SRAM to read from based on the beat counter.
val dataOutputVec = Wire(Vec(params.numBeats, gen))
val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats)
(nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) =>
dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable)
}
// Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured
// by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading
// from.
val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable)
// Mark 'data' burst entries as no-longer-present as they get read out of the SRAM.
when(dataMemReadEnable) {
dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries)
}
// As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue
// a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know
// we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be
// consumed by the output channel).
val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready
dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem)
// While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need
// 'responseListIndex' to be coherent for the entire unwind process.
val rawResponseListIndex = map.read(responseIndex)
val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst)
responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex)
// Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are
// just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that
// could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be
// single-ported.
ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding
// Either pass an in-order response to the output channel, or data read from the unwind FSM.
ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder)
ioDataOut.bits.listIndex := responseListIndex
ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data)
// It's an error to get a response that isn't associated with a valid linked list.
when(ioResponse.fire || unwindDataIsValid) {
assert(
valid(responseListIndex),
"No linked list exists at index %d, mapped from %d",
responseListIndex,
responseIndex
)
}
when(busyUnwinding && dataMemReadEnable) {
assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order")
}
}
/** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1.
*
* Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to
* reorder any responses, or store any linked lists.
*/
class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1")
val used = RegInit(0.U(params.numEntries.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
used := (used & ~usedClr) | usedSet
ioReserve.ready := used === 0.U
// Store which list index was reserved, we need to return this value when we get a response.
when(ioReserve.fire) {
usedSet := 1.U
map.write(0.U, ioReserve.bits)
}
// There's only one valid linked list entry, which is at index 0.
ioReservedIndex := 0.U
val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1
// Mark the linked list as empty when we get the last beat in a response.
// Note that 'ioResponse.fire === ioDataOut.fire'.
when(ioResponse.fire && isLastResponseBeat) {
usedClr := 1.U
}
// Always pass the response data straight through, since we never need to reorder the response data.
ioDataOut.bits.listIndex := map.read(0.U)
ioDataOut.bits.payload := ioResponse.bits.data
ioDataOut.valid := ioResponse.valid
ioResponse.ready := ioDataOut.ready
}
| module dataMems_218( // @[UnsafeAXI4ToTL.scala:365:62]
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [66:0] R0_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [66:0] W0_data
);
dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62]
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data)
); // @[UnsafeAXI4ToTL.scala:365:62]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_124( // @[AsyncQueue.scala:58:7]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in = 1'h1; // @[ShiftReg.scala:45:23]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_145 io_out_sink_valid_0 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_21( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [25:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [25:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_39 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_41 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_45 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_47 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_51 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_53 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_57 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_59 = 1'h1; // @[Parameters.scala:57:20]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [25:0] _c_first_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _c_first_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [25:0] _c_first_WIRE_2_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _c_first_WIRE_3_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [25:0] _c_set_wo_ready_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _c_set_wo_ready_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [25:0] _c_set_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _c_set_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [25:0] _c_opcodes_set_interm_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _c_opcodes_set_interm_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [25:0] _c_sizes_set_interm_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _c_sizes_set_interm_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [25:0] _c_opcodes_set_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _c_opcodes_set_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [25:0] _c_sizes_set_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _c_sizes_set_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [25:0] _c_probe_ack_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _c_probe_ack_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [25:0] _c_probe_ack_WIRE_2_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _c_probe_ack_WIRE_3_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [25:0] _same_cycle_resp_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _same_cycle_resp_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [25:0] _same_cycle_resp_WIRE_2_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _same_cycle_resp_WIRE_3_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [25:0] _same_cycle_resp_WIRE_4_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _same_cycle_resp_WIRE_5_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_wo_ready_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_wo_ready_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [1026:0] _c_opcodes_set_T_1 = 1027'h0; // @[Monitor.scala:767:54]
wire [1026:0] _c_sizes_set_T_1 = 1027'h0; // @[Monitor.scala:768:52]
wire [9:0] _c_opcodes_set_T = 10'h0; // @[Monitor.scala:767:79]
wire [9:0] _c_sizes_set_T = 10'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [127:0] _c_set_wo_ready_T = 128'h1; // @[OneHot.scala:58:35]
wire [127:0] _c_set_T = 128'h1; // @[OneHot.scala:58:35]
wire [259:0] c_opcodes_set = 260'h0; // @[Monitor.scala:740:34]
wire [259:0] c_sizes_set = 260'h0; // @[Monitor.scala:741:34]
wire [64:0] c_set = 65'h0; // @[Monitor.scala:738:34]
wire [64:0] c_set_wo_ready = 65'h0; // @[Monitor.scala:739:34]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [6:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_4 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = io_in_a_bits_source_0 == 7'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_13 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_19 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_2 = _source_ok_T_1 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_8 = _source_ok_T_7 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_14 = _source_ok_T_13 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_20 = _source_ok_T_19 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31]
wire _source_ok_T_25 = io_in_a_bits_source_0 == 7'h20; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_5 = _source_ok_T_25; // @[Parameters.scala:1138:31]
wire _source_ok_T_26 = io_in_a_bits_source_0 == 7'h21; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_6 = _source_ok_T_26; // @[Parameters.scala:1138:31]
wire _source_ok_T_27 = io_in_a_bits_source_0 == 7'h22; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_7 = _source_ok_T_27; // @[Parameters.scala:1138:31]
wire _source_ok_T_28 = io_in_a_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_8 = _source_ok_T_28; // @[Parameters.scala:1138:31]
wire _source_ok_T_29 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_30 = _source_ok_T_29 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_31 = _source_ok_T_30 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_32 = _source_ok_T_31 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_33 = _source_ok_T_32 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_34 = _source_ok_T_33 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_35 = _source_ok_T_34 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_35 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [25:0] _is_aligned_T = {20'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 26'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_39 = _uncommonBits_T_39[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_36 = io_in_d_bits_source_0 == 7'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_36; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_37 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_43 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_49 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_55 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_38 = _source_ok_T_37 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_40 = _source_ok_T_38; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_42 = _source_ok_T_40; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_1 = _source_ok_T_42; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_44 = _source_ok_T_43 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_46 = _source_ok_T_44; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_48 = _source_ok_T_46; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_2 = _source_ok_T_48; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_50 = _source_ok_T_49 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_52 = _source_ok_T_50; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_54 = _source_ok_T_52; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_3 = _source_ok_T_54; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_56 = _source_ok_T_55 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_58 = _source_ok_T_56; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_60 = _source_ok_T_58; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_4 = _source_ok_T_60; // @[Parameters.scala:1138:31]
wire _source_ok_T_61 = io_in_d_bits_source_0 == 7'h20; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_5 = _source_ok_T_61; // @[Parameters.scala:1138:31]
wire _source_ok_T_62 = io_in_d_bits_source_0 == 7'h21; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_6 = _source_ok_T_62; // @[Parameters.scala:1138:31]
wire _source_ok_T_63 = io_in_d_bits_source_0 == 7'h22; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_7 = _source_ok_T_63; // @[Parameters.scala:1138:31]
wire _source_ok_T_64 = io_in_d_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_8 = _source_ok_T_64; // @[Parameters.scala:1138:31]
wire _source_ok_T_65 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_66 = _source_ok_T_65 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_67 = _source_ok_T_66 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_68 = _source_ok_T_67 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_69 = _source_ok_T_68 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_70 = _source_ok_T_69 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_71 = _source_ok_T_70 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_71 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46]
wire _T_1101 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_1101; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_1101; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [6:0] source; // @[Monitor.scala:390:22]
reg [25:0] address; // @[Monitor.scala:391:22]
wire _T_1174 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1174; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1174; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1174; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [6:0] source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [64:0] inflight; // @[Monitor.scala:614:27]
reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [259:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [64:0] a_set; // @[Monitor.scala:626:34]
wire [64:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [259:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [259:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [9:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [9:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [9:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [9:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [9:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [9:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [9:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [9:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [9:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [259:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [259:0] _a_opcode_lookup_T_6 = {256'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [259:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [259:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [259:0] _a_size_lookup_T_6 = {256'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [259:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[259:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [127:0] _GEN_2 = 128'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [127:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1027 = _T_1101 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_1027 ? _a_set_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_1027 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_1027 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [9:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [9:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [9:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [1026:0] _a_opcodes_set_T_1 = {1023'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_1027 ? _a_opcodes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [1026:0] _a_sizes_set_T_1 = {1023'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_1027 ? _a_sizes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [64:0] d_clr; // @[Monitor.scala:664:34]
wire [64:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [259:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [259:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_1073 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [127:0] _GEN_5 = 128'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_1073 & ~d_release_ack ? _d_clr_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1042 = _T_1174 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_1042 ? _d_clr_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_5 = 1039'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_1042 ? _d_opcodes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [1038:0] _d_sizes_clr_T_5 = 1039'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_1042 ? _d_sizes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [64:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [64:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [64:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [259:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [259:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [259:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [259:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [259:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [259:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [64:0] inflight_1; // @[Monitor.scala:726:35]
wire [64:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [259:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [259:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [259:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [259:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [259:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [259:0] _c_opcode_lookup_T_6 = {256'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [259:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [259:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [259:0] _c_size_lookup_T_6 = {256'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [259:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[259:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [64:0] d_clr_1; // @[Monitor.scala:774:34]
wire [64:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [259:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [259:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_1145 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_1145 & d_release_ack_1 ? _d_clr_wo_ready_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1127 = _T_1174 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_1127 ? _d_clr_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_11 = 1039'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_1127 ? _d_opcodes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [1038:0] _d_sizes_clr_T_11 = 1039'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_1127 ? _d_sizes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 7'h0; // @[Monitor.scala:36:7, :795:113]
wire [64:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [64:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [259:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [259:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [259:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [259:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_25( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19]
wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File UnsafeAXI4ToTL.scala:
package ara
import chisel3._
import chisel3.util._
import freechips.rocketchip.amba._
import freechips.rocketchip.amba.axi4._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle {
val data = UInt(dataWidth.W)
val resp = UInt(respWidth.W)
val last = Bool()
val user = BundleMap(userFields)
}
/** Parameters for [[BaseReservableListBuffer]] and all child classes.
*
* @param numEntries Total number of elements that can be stored in the 'data' RAM
* @param numLists Maximum number of linked lists
* @param numBeats Maximum number of beats per entry
*/
case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) {
// Avoid zero-width wires when we call 'log2Ceil'
val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries)
val listBits = if (numLists == 1) 1 else log2Ceil(numLists)
val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats)
}
case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName)
extends MixedAdapterNode(AXI4Imp, TLImp)(
dFn = { case mp =>
TLMasterPortParameters.v2(
masters = mp.masters.zipWithIndex.map { case (m, i) =>
// Support 'numTlTxns' read requests and 'numTlTxns' write requests at once.
val numSourceIds = numTlTxns * 2
TLMasterParameters.v2(
name = m.name,
sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds),
nodePath = m.nodePath
)
},
echoFields = mp.echoFields,
requestFields = AMBAProtField() +: mp.requestFields,
responseKeys = mp.responseKeys
)
},
uFn = { mp =>
AXI4SlavePortParameters(
slaves = mp.managers.map { m =>
val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits))
AXI4SlaveParameters(
address = m.address,
resources = m.resources,
regionType = m.regionType,
executable = m.executable,
nodePath = m.nodePath,
supportsWrite = m.supportsPutPartial.intersect(maxXfer),
supportsRead = m.supportsGet.intersect(maxXfer),
interleavedId = Some(0) // TL2 never interleaves D beats
)
},
beatBytes = mp.beatBytes,
minLatency = mp.minLatency,
responseFields = mp.responseFields,
requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt)
)
}
)
class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule {
require(numTlTxns >= 1)
require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2")
val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt)
lazy val module = new LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
edgeIn.master.masters.foreach { m =>
require(m.aligned, "AXI4ToTL requires aligned requests")
}
val numIds = edgeIn.master.endId
val beatBytes = edgeOut.slave.beatBytes
val maxTransfer = edgeOut.slave.maxTransfer
val maxBeats = maxTransfer / beatBytes
// Look for an Error device to redirect bad requests
val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError")
require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.")
val errorDev = errorDevs.maxBy(_.maxTransfer)
val errorDevAddr = errorDev.address.head.base
require(
errorDev.supportsPutPartial.contains(maxTransfer),
s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer"
)
require(
errorDev.supportsGet.contains(maxTransfer),
s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer"
)
// All of the read-response reordering logic.
val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields)
val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats)
val listBuffer = if (numTlTxns > 1) {
Module(new ReservableListBuffer(listBufData, listBufParams))
} else {
Module(new PassthroughListBuffer(listBufData, listBufParams))
}
// To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to
// 0 for read requests and 1 for write requests.
val isReadSourceBit = 0.U(1.W)
val isWriteSourceBit = 1.U(1.W)
/* Read request logic */
val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val rBytes1 = in.ar.bits.bytes1()
val rSize = OH1ToUInt(rBytes1)
val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize)
val rId = if (numTlTxns > 1) {
Cat(isReadSourceBit, listBuffer.ioReservedIndex)
} else {
isReadSourceBit
}
val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Indicates if there are still valid TileLink source IDs left to use.
val canIssueR = listBuffer.ioReserve.ready
listBuffer.ioReserve.bits := in.ar.bits.id
listBuffer.ioReserve.valid := in.ar.valid && rOut.ready
in.ar.ready := rOut.ready && canIssueR
rOut.valid := in.ar.valid && canIssueR
rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2
rOut.bits.user :<= in.ar.bits.user
rOut.bits.user.lift(AMBAProt).foreach { rProt =>
rProt.privileged := in.ar.bits.prot(0)
rProt.secure := !in.ar.bits.prot(1)
rProt.fetch := in.ar.bits.prot(2)
rProt.bufferable := in.ar.bits.cache(0)
rProt.modifiable := in.ar.bits.cache(1)
rProt.readalloc := in.ar.bits.cache(2)
rProt.writealloc := in.ar.bits.cache(3)
}
/* Write request logic */
// Strip off the MSB, which identifies the transaction as read vs write.
val strippedResponseSourceId = if (numTlTxns > 1) {
out.d.bits.source((out.d.bits.source).getWidth - 2, 0)
} else {
// When there's only 1 TileLink transaction allowed for read/write, then this field is always 0.
0.U(1.W)
}
// Track when a write request burst is in progress.
val writeBurstBusy = RegInit(false.B)
when(in.w.fire) {
writeBurstBusy := !in.w.bits.last
}
val usedWriteIds = RegInit(0.U(numTlTxns.W))
val canIssueW = !usedWriteIds.andR
val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W))
val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W))
usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet
// Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't
// change mid-burst.
val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W))
val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy
val freeWriteIdIndex = OHToUInt(freeWriteIdOH)
freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds
val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val wBytes1 = in.aw.bits.bytes1()
val wSize = OH1ToUInt(wBytes1)
val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize)
val wId = if (numTlTxns > 1) {
Cat(isWriteSourceBit, freeWriteIdIndex)
} else {
isWriteSourceBit
}
val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain
// asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but
// the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb
// bits during a W-channel burst.
in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW
in.w.ready := wOut.ready && in.aw.valid && canIssueW
wOut.valid := in.aw.valid && in.w.valid && canIssueW
wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2
in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ }
wOut.bits.user :<= in.aw.bits.user
wOut.bits.user.lift(AMBAProt).foreach { wProt =>
wProt.privileged := in.aw.bits.prot(0)
wProt.secure := !in.aw.bits.prot(1)
wProt.fetch := in.aw.bits.prot(2)
wProt.bufferable := in.aw.bits.cache(0)
wProt.modifiable := in.aw.bits.cache(1)
wProt.readalloc := in.aw.bits.cache(2)
wProt.writealloc := in.aw.bits.cache(3)
}
// Merge the AXI4 read/write requests into the TL-A channel.
TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut))
/* Read/write response logic */
val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle)))
val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle)))
val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY)
val dHasData = edgeOut.hasData(out.d.bits)
val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d)
val dNumBeats1 = edgeOut.numBeats1(out.d.bits)
// Handle cases where writeack arrives before write is done
val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U
out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck)
listBuffer.ioDataOut.ready := okR.ready
okR.valid := listBuffer.ioDataOut.valid
okB.valid := out.d.valid && !dHasData && !writeEarlyAck
listBuffer.ioResponse.valid := out.d.valid && dHasData
listBuffer.ioResponse.bits.index := strippedResponseSourceId
listBuffer.ioResponse.bits.data.data := out.d.bits.data
listBuffer.ioResponse.bits.data.resp := dResp
listBuffer.ioResponse.bits.data.last := dLast
listBuffer.ioResponse.bits.data.user :<= out.d.bits.user
listBuffer.ioResponse.bits.count := dCount
listBuffer.ioResponse.bits.numBeats1 := dNumBeats1
okR.bits.id := listBuffer.ioDataOut.bits.listIndex
okR.bits.data := listBuffer.ioDataOut.bits.payload.data
okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp
okR.bits.last := listBuffer.ioDataOut.bits.payload.last
okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user
// Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write
// response, mark the write transaction as complete.
val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W))
val writeResponseId = writeIdMap.read(strippedResponseSourceId)
when(wOut.fire) {
writeIdMap.write(freeWriteIdIndex, in.aw.bits.id)
}
when(edgeOut.done(wOut)) {
usedWriteIdsSet := freeWriteIdOH
}
when(okB.fire) {
usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns)
}
okB.bits.id := writeResponseId
okB.bits.resp := dResp
okB.bits.user :<= out.d.bits.user
// AXI4 needs irrevocable behaviour
in.r <> Queue.irrevocable(okR, 1, flow = true)
in.b <> Queue.irrevocable(okB, 1, flow = true)
// Unused channels
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
/* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */
def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = {
val lReqType = reqType.toLowerCase
when(a.valid) {
assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U)
// Narrow transfers and FIXED bursts must be single-beat bursts.
when(a.bits.len =/= 0.U) {
assert(
a.bits.size === log2Ceil(beatBytes).U,
s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)",
1.U << a.bits.size,
a.bits.len + 1.U
)
assert(
a.bits.burst =/= AXI4Parameters.BURST_FIXED,
s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)",
a.bits.len + 1.U
)
}
// Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in
// particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink
// Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts.
}
}
checkRequest(in.ar, "Read")
checkRequest(in.aw, "Write")
}
}
}
object UnsafeAXI4ToTL {
def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = {
val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt))
axi42tl.node
}
}
/* ReservableListBuffer logic, and associated classes. */
class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle {
val index = UInt(params.entryBits.W)
val count = UInt(params.beatBits.W)
val numBeats1 = UInt(params.beatBits.W)
}
class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle {
val listIndex = UInt(params.listBits.W)
}
/** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */
abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends Module {
require(params.numEntries > 0)
require(params.numLists > 0)
val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W))))
val ioReservedIndex = IO(Output(UInt(params.entryBits.W)))
val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params))))
val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params)))
}
/** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve
* linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the
* 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a
* given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order.
*
* ==Constructor==
* @param gen Chisel type of linked list data element
* @param params Other parameters
*
* ==Module IO==
* @param ioReserve Index of list to reserve a new element in
* @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire'
* @param ioResponse Payload containing response data and linked-list-entry index
* @param ioDataOut Payload containing data read from response linked list and linked list index
*/
class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
val valid = RegInit(0.U(params.numLists.W))
val head = Mem(params.numLists, UInt(params.entryBits.W))
val tail = Mem(params.numLists, UInt(params.entryBits.W))
val used = RegInit(0.U(params.numEntries.W))
val next = Mem(params.numEntries, UInt(params.entryBits.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) }
val dataIsPresent = RegInit(0.U(params.numEntries.W))
val beats = Mem(params.numEntries, UInt(params.beatBits.W))
// The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower.
val dataMemReadEnable = WireDefault(false.B)
val dataMemWriteEnable = WireDefault(false.B)
assert(!(dataMemReadEnable && dataMemWriteEnable))
// 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the
// lowest-index entry in the 'data' RAM which is free.
val freeOH = Wire(UInt(params.numEntries.W))
val freeIndex = OHToUInt(freeOH)
freeOH := ~(leftOR(~used) << 1) & ~used
ioReservedIndex := freeIndex
val validSet = WireDefault(0.U(params.numLists.W))
val validClr = WireDefault(0.U(params.numLists.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
val dataIsPresentSet = WireDefault(0.U(params.numEntries.W))
val dataIsPresentClr = WireDefault(0.U(params.numEntries.W))
valid := (valid & ~validClr) | validSet
used := (used & ~usedClr) | usedSet
dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet
/* Reservation logic signals */
val reserveTail = Wire(UInt(params.entryBits.W))
val reserveIsValid = Wire(Bool())
/* Response logic signals */
val responseIndex = Wire(UInt(params.entryBits.W))
val responseListIndex = Wire(UInt(params.listBits.W))
val responseHead = Wire(UInt(params.entryBits.W))
val responseTail = Wire(UInt(params.entryBits.W))
val nextResponseHead = Wire(UInt(params.entryBits.W))
val nextDataIsPresent = Wire(Bool())
val isResponseInOrder = Wire(Bool())
val isEndOfList = Wire(Bool())
val isLastBeat = Wire(Bool())
val isLastResponseBeat = Wire(Bool())
val isLastUnwindBeat = Wire(Bool())
/* Reservation logic */
reserveTail := tail.read(ioReserve.bits)
reserveIsValid := valid(ioReserve.bits)
ioReserve.ready := !used.andR
// When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we
// actually start a new list, rather than appending to a list that's about to disappear.
val reserveResponseSameList = ioReserve.bits === responseListIndex
val appendToAndDestroyList =
ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat
when(ioReserve.fire) {
validSet := UIntToOH(ioReserve.bits, params.numLists)
usedSet := freeOH
when(reserveIsValid && !appendToAndDestroyList) {
next.write(reserveTail, freeIndex)
}.otherwise {
head.write(ioReserve.bits, freeIndex)
}
tail.write(ioReserve.bits, freeIndex)
map.write(freeIndex, ioReserve.bits)
}
/* Response logic */
// The majority of the response logic (reading from and writing to the various RAMs) is common between the
// response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid).
// The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the
// 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and
// response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after
// two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker.
responseHead := head.read(responseListIndex)
responseTail := tail.read(responseListIndex)
nextResponseHead := next.read(responseIndex)
nextDataIsPresent := dataIsPresent(nextResponseHead)
// Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since
// there isn't a next element in the linked list.
isResponseInOrder := responseHead === responseIndex
isEndOfList := responseHead === responseTail
isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1
// When a response's last beat is sent to the output channel, mark it as completed. This can happen in two
// situations:
// 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM
// reservation was never needed.
// 2. An entry is read out of the 'data' SRAM (within the unwind FSM).
when(ioDataOut.fire && isLastBeat) {
// Mark the reservation as no-longer-used.
usedClr := UIntToOH(responseIndex, params.numEntries)
// If the response is in-order, then we're popping an element from this linked list.
when(isEndOfList) {
// Once we pop the last element from a linked list, mark it as no-longer-present.
validClr := UIntToOH(responseListIndex, params.numLists)
}.otherwise {
// Move the linked list's head pointer to the new head pointer.
head.write(responseListIndex, nextResponseHead)
}
}
// If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding.
when(ioResponse.fire && !isResponseInOrder) {
dataMemWriteEnable := true.B
when(isLastResponseBeat) {
dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries)
beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1)
}
}
// Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to.
val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats)
(responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) =>
when(select && dataMemWriteEnable) {
seqMem.write(ioResponse.bits.index, ioResponse.bits.data)
}
}
/* Response unwind logic */
// Unwind FSM state definitions
val sIdle :: sUnwinding :: Nil = Enum(2)
val unwindState = RegInit(sIdle)
val busyUnwinding = unwindState === sUnwinding
val startUnwind = Wire(Bool())
val stopUnwind = Wire(Bool())
when(startUnwind) {
unwindState := sUnwinding
}.elsewhen(stopUnwind) {
unwindState := sIdle
}
assert(!(startUnwind && stopUnwind))
// Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to
// become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is
// invalid.
//
// Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to
// worry about overwriting the 'data' SRAM's output when we start the unwind FSM.
startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent
// Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of
// two things happens:
// 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent)
// 2. There are no more outstanding responses in this list (isEndOfList)
//
// Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are
// passing from 'ioResponse' to 'ioDataOut'.
stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList)
val isUnwindBurstOver = Wire(Bool())
val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable)
// Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of
// beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we
// increment 'beatCounter' until it reaches 'unwindBeats1'.
val unwindBeats1 = Reg(UInt(params.beatBits.W))
val nextBeatCounter = Wire(UInt(params.beatBits.W))
val beatCounter = RegNext(nextBeatCounter)
isUnwindBurstOver := beatCounter === unwindBeats1
when(startNewBurst) {
unwindBeats1 := beats.read(nextResponseHead)
nextBeatCounter := 0.U
}.elsewhen(dataMemReadEnable) {
nextBeatCounter := beatCounter + 1.U
}.otherwise {
nextBeatCounter := beatCounter
}
// When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next
// entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which
// happens at the start of reading a new stored burst).
val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst)
responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index)
// Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the
// SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead
// holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'.
val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex)
// The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid
// until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle).
val unwindDataIsValid = RegInit(false.B)
when(dataMemReadEnable) {
unwindDataIsValid := true.B
}.elsewhen(ioDataOut.fire) {
unwindDataIsValid := false.B
}
isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid
// Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats.
isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat)
// Select which SRAM to read from based on the beat counter.
val dataOutputVec = Wire(Vec(params.numBeats, gen))
val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats)
(nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) =>
dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable)
}
// Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured
// by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading
// from.
val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable)
// Mark 'data' burst entries as no-longer-present as they get read out of the SRAM.
when(dataMemReadEnable) {
dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries)
}
// As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue
// a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know
// we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be
// consumed by the output channel).
val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready
dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem)
// While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need
// 'responseListIndex' to be coherent for the entire unwind process.
val rawResponseListIndex = map.read(responseIndex)
val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst)
responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex)
// Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are
// just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that
// could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be
// single-ported.
ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding
// Either pass an in-order response to the output channel, or data read from the unwind FSM.
ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder)
ioDataOut.bits.listIndex := responseListIndex
ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data)
// It's an error to get a response that isn't associated with a valid linked list.
when(ioResponse.fire || unwindDataIsValid) {
assert(
valid(responseListIndex),
"No linked list exists at index %d, mapped from %d",
responseListIndex,
responseIndex
)
}
when(busyUnwinding && dataMemReadEnable) {
assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order")
}
}
/** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1.
*
* Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to
* reorder any responses, or store any linked lists.
*/
class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1")
val used = RegInit(0.U(params.numEntries.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
used := (used & ~usedClr) | usedSet
ioReserve.ready := used === 0.U
// Store which list index was reserved, we need to return this value when we get a response.
when(ioReserve.fire) {
usedSet := 1.U
map.write(0.U, ioReserve.bits)
}
// There's only one valid linked list entry, which is at index 0.
ioReservedIndex := 0.U
val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1
// Mark the linked list as empty when we get the last beat in a response.
// Note that 'ioResponse.fire === ioDataOut.fire'.
when(ioResponse.fire && isLastResponseBeat) {
usedClr := 1.U
}
// Always pass the response data straight through, since we never need to reorder the response data.
ioDataOut.bits.listIndex := map.read(0.U)
ioDataOut.bits.payload := ioResponse.bits.data
ioDataOut.valid := ioResponse.valid
ioResponse.ready := ioDataOut.ready
}
| module dataMems_46( // @[UnsafeAXI4ToTL.scala:365:62]
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [66:0] R0_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [66:0] W0_data
);
dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62]
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data)
); // @[UnsafeAXI4ToTL.scala:365:62]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Tile.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
import Util._
/**
* A Tile is a purely combinational 2D array of passThrough PEs.
* a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs
* @param width The data width of each PE in bits
* @param rows Number of PEs on each row
* @param columns Number of PEs on each column
*/
class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module {
val io = IO(new Bundle {
val in_a = Input(Vec(rows, inputType))
val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it
val in_d = Input(Vec(columns, outputType))
val in_control = Input(Vec(columns, new PEControl(accType)))
val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val in_last = Input(Vec(columns, Bool()))
val out_a = Output(Vec(rows, inputType))
val out_c = Output(Vec(columns, outputType))
val out_b = Output(Vec(columns, outputType))
val out_control = Output(Vec(columns, new PEControl(accType)))
val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val out_last = Output(Vec(columns, Bool()))
val in_valid = Input(Vec(columns, Bool()))
val out_valid = Output(Vec(columns, Bool()))
val bad_dataflow = Output(Bool())
})
import ev._
val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls)))
val tileT = tile.transpose
// TODO: abstract hori/vert broadcast, all these connections look the same
// Broadcast 'a' horizontally across the Tile
for (r <- 0 until rows) {
tile(r).foldLeft(io.in_a(r)) {
case (in_a, pe) =>
pe.io.in_a := in_a
pe.io.out_a
}
}
// Broadcast 'b' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_b(c)) {
case (in_b, pe) =>
pe.io.in_b := (if (tree_reduction) in_b.zero else in_b)
pe.io.out_b
}
}
// Broadcast 'd' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_d(c)) {
case (in_d, pe) =>
pe.io.in_d := in_d
pe.io.out_c
}
}
// Broadcast 'control' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_control(c)) {
case (in_ctrl, pe) =>
pe.io.in_control := in_ctrl
pe.io.out_control
}
}
// Broadcast 'garbage' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_valid(c)) {
case (v, pe) =>
pe.io.in_valid := v
pe.io.out_valid
}
}
// Broadcast 'id' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_id(c)) {
case (id, pe) =>
pe.io.in_id := id
pe.io.out_id
}
}
// Broadcast 'last' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_last(c)) {
case (last, pe) =>
pe.io.in_last := last
pe.io.out_last
}
}
// Drive the Tile's bottom IO
for (c <- 0 until columns) {
io.out_c(c) := tile(rows-1)(c).io.out_c
io.out_control(c) := tile(rows-1)(c).io.out_control
io.out_id(c) := tile(rows-1)(c).io.out_id
io.out_last(c) := tile(rows-1)(c).io.out_last
io.out_valid(c) := tile(rows-1)(c).io.out_valid
io.out_b(c) := {
if (tree_reduction) {
val prods = tileT(c).map(_.io.out_b)
accumulateTree(prods :+ io.in_b(c))
} else {
tile(rows - 1)(c).io.out_b
}
}
}
io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_)
// Drive the Tile's right IO
for (r <- 0 until rows) {
io.out_a(r) := tile(r)(columns-1).io.out_a
}
}
| module Tile_140( // @[Tile.scala:16:7]
input clock, // @[Tile.scala:16:7]
input reset, // @[Tile.scala:16:7]
input [7:0] io_in_a_0, // @[Tile.scala:17:14]
input [19:0] io_in_b_0, // @[Tile.scala:17:14]
input [19:0] io_in_d_0, // @[Tile.scala:17:14]
input io_in_control_0_dataflow, // @[Tile.scala:17:14]
input io_in_control_0_propagate, // @[Tile.scala:17:14]
input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14]
input [2:0] io_in_id_0, // @[Tile.scala:17:14]
input io_in_last_0, // @[Tile.scala:17:14]
output [7:0] io_out_a_0, // @[Tile.scala:17:14]
output [19:0] io_out_c_0, // @[Tile.scala:17:14]
output [19:0] io_out_b_0, // @[Tile.scala:17:14]
output io_out_control_0_dataflow, // @[Tile.scala:17:14]
output io_out_control_0_propagate, // @[Tile.scala:17:14]
output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14]
output [2:0] io_out_id_0, // @[Tile.scala:17:14]
output io_out_last_0, // @[Tile.scala:17:14]
input io_in_valid_0, // @[Tile.scala:17:14]
output io_out_valid_0 // @[Tile.scala:17:14]
);
wire [7:0] io_in_a_0_0 = io_in_a_0; // @[Tile.scala:16:7]
wire [19:0] io_in_b_0_0 = io_in_b_0; // @[Tile.scala:16:7]
wire [19:0] io_in_d_0_0 = io_in_d_0; // @[Tile.scala:16:7]
wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7]
wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7]
wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7]
wire [2:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7]
wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7]
wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7]
wire io_bad_dataflow = 1'h0; // @[Tile.scala:16:7, :17:14, :42:44]
wire [7:0] io_out_a_0_0; // @[Tile.scala:16:7]
wire [19:0] io_out_c_0_0; // @[Tile.scala:16:7]
wire [19:0] io_out_b_0_0; // @[Tile.scala:16:7]
wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
wire io_out_control_0_propagate_0; // @[Tile.scala:16:7]
wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7]
wire [2:0] io_out_id_0_0; // @[Tile.scala:16:7]
wire io_out_last_0_0; // @[Tile.scala:16:7]
wire io_out_valid_0_0; // @[Tile.scala:16:7]
PE_396 tile_0_0 ( // @[Tile.scala:42:44]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0_0), // @[Tile.scala:16:7]
.io_in_b (io_in_b_0_0), // @[Tile.scala:16:7]
.io_in_d (io_in_d_0_0), // @[Tile.scala:16:7]
.io_out_a (io_out_a_0_0),
.io_out_b (io_out_b_0_0),
.io_out_c (io_out_c_0_0),
.io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7]
.io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7]
.io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7]
.io_out_control_dataflow (io_out_control_0_dataflow_0),
.io_out_control_propagate (io_out_control_0_propagate_0),
.io_out_control_shift (io_out_control_0_shift_0),
.io_in_id (io_in_id_0_0), // @[Tile.scala:16:7]
.io_out_id (io_out_id_0_0),
.io_in_last (io_in_last_0_0), // @[Tile.scala:16:7]
.io_out_last (io_out_last_0_0),
.io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7]
.io_out_valid (io_out_valid_0_0)
); // @[Tile.scala:42:44]
assign io_out_a_0 = io_out_a_0_0; // @[Tile.scala:16:7]
assign io_out_c_0 = io_out_c_0_0; // @[Tile.scala:16:7]
assign io_out_b_0 = io_out_b_0_0; // @[Tile.scala:16:7]
assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7]
assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7]
assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7]
assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7]
assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_13( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [9:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [28:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [9:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire [12:0] _GEN = {10'h0, io_in_a_bits_size}; // @[package.scala:243:71]
wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35]
reg [3:0] a_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [9:0] source; // @[Monitor.scala:390:22]
reg [28:0] address; // @[Monitor.scala:391:22]
reg [3:0] d_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [9:0] source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [512:0] inflight; // @[Monitor.scala:614:27]
reg [2051:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [2051:0] inflight_sizes; // @[Monitor.scala:618:33]
reg [3:0] a_first_counter_1; // @[Edges.scala:229:27]
wire a_first_1 = a_first_counter_1 == 4'h0; // @[Edges.scala:229:27, :231:25]
reg [3:0] d_first_counter_1; // @[Edges.scala:229:27]
wire d_first_1 = d_first_counter_1 == 4'h0; // @[Edges.scala:229:27, :231:25]
wire [1023:0] _GEN_0 = {1014'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35]
wire _GEN_1 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35]
wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46]
wire _GEN_2 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74]
wire [1023:0] _GEN_3 = {1014'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
reg [512:0] inflight_1; // @[Monitor.scala:726:35]
reg [2051:0] inflight_sizes_1; // @[Monitor.scala:728:35]
reg [3:0] d_first_counter_2; // @[Edges.scala:229:27]
wire d_first_2 = d_first_counter_2 == 4'h0; // @[Edges.scala:229:27, :231:25]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_351( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19]
wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File bim.scala:
package boom.v4.ifu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import boom.v4.common._
import boom.v4.util.{BoomCoreStringPrefix, WrapInc}
import scala.math.min
class BIMMeta(implicit p: Parameters) extends BoomBundle()(p)
with HasBoomFrontendParameters
{
val bims = Vec(bankWidth, UInt(2.W))
}
case class BoomBIMParams(
nSets: Int = 2048,
nCols: Int = 8,
singlePorted: Boolean = true,
useFlops: Boolean = false,
slow: Boolean = false
)
class BIMBranchPredictorBank(params: BoomBIMParams = BoomBIMParams())(implicit p: Parameters) extends BranchPredictorBank()(p)
{
override val nSets = params.nSets
val nCols = params.nCols
val nSetsPerCol = nSets / nCols
require(isPow2(nSets))
require(isPow2(nCols))
require(nCols < nSets)
require(nCols > 1)
val nWrBypassEntries = 2
def bimWrite(v: UInt, taken: Bool): UInt = {
val old_bim_sat_taken = v === 3.U
val old_bim_sat_ntaken = v === 0.U
Mux(old_bim_sat_taken && taken, 3.U,
Mux(old_bim_sat_ntaken && !taken, 0.U,
Mux(taken, v + 1.U, v - 1.U)))
}
val s2_meta = Wire(new BIMMeta)
override val metaSz = s2_meta.asUInt.getWidth
val doing_reset = RegInit(true.B)
val reset_idx = RegInit(0.U(log2Ceil(nSetsPerCol).W))
reset_idx := reset_idx + doing_reset
when (reset_idx === (nSetsPerCol-1).U) { doing_reset := false.B }
val mems = (0 until nCols) map {c => (f"bim_col$c", nSetsPerCol, bankWidth * 2)}
val s0_col_mask = UIntToOH(s0_idx(log2Ceil(nCols)-1,0)) & Fill(nCols, s0_valid)
val s1_col_mask = RegNext(s0_col_mask)
val s0_col_idx = s0_idx >> log2Ceil(nCols)
val s1_col_idx = RegNext(s0_col_idx)
val s2_req_rdata_all = Wire(Vec(nCols, Vec(bankWidth, UInt(2.W))))
val s2_req_rdata = Mux1H(RegNext(s1_col_mask), s2_req_rdata_all)
val s2_resp = Wire(Vec(bankWidth, Bool()))
for (w <- 0 until bankWidth) {
s2_resp(w) := s2_valid && s2_req_rdata(w)(1) && !doing_reset
s2_meta.bims(w) := s2_req_rdata(w)
if (!params.slow) {
io.resp.f2(w).taken := s2_resp(w)
}
io.resp.f3(w).taken := RegNext(s2_resp(w))
}
io.f3_meta := RegNext(s2_meta.asUInt)
val s1_update_wdata = Wire(Vec(bankWidth, UInt(2.W)))
val s1_update_wmask = Wire(Vec(bankWidth, Bool()))
val s1_update_meta = s1_update.bits.meta.asTypeOf(new BIMMeta)
val s1_update_col_mask = UIntToOH(s1_update_idx(log2Ceil(nCols)-1,0))
val s1_update_col_idx = s1_update_idx >> log2Ceil(nCols)
val wrbypass_idxs = Reg(Vec(nWrBypassEntries, UInt(log2Ceil(nSets).W)))
val wrbypass = Reg(Vec(nWrBypassEntries, Vec(bankWidth, UInt(2.W))))
val wrbypass_enq_idx = RegInit(0.U(log2Ceil(nWrBypassEntries).W))
val wrbypass_hits = VecInit((0 until nWrBypassEntries) map { i =>
!doing_reset &&
wrbypass_idxs(i) === s1_update_idx(log2Ceil(nSets)-1,0)
})
val wrbypass_hit = wrbypass_hits.reduce(_||_)
val wrbypass_hit_idx = PriorityEncoder(wrbypass_hits)
for (w <- 0 until bankWidth) {
s1_update_wmask(w) := false.B
s1_update_wdata(w) := DontCare
val update_pc = s1_update.bits.pc + (w << 1).U
when (s1_update.bits.br_mask(w) ||
(s1_update.bits.cfi_idx.valid && s1_update.bits.cfi_idx.bits === w.U)) {
val was_taken = (
s1_update.bits.cfi_idx.valid &&
(s1_update.bits.cfi_idx.bits === w.U) &&
(
(s1_update.bits.cfi_is_br && s1_update.bits.br_mask(w) && s1_update.bits.cfi_taken) ||
s1_update.bits.cfi_is_jal
)
)
val old_bim_value = Mux(wrbypass_hit, wrbypass(wrbypass_hit_idx)(w), s1_update_meta.bims(w))
s1_update_wmask(w) := true.B
s1_update_wdata(w) := bimWrite(old_bim_value, was_taken)
}
}
for (c <- 0 until nCols) {
val rdata = Wire(Vec(bankWidth, UInt(2.W)))
rdata := DontCare
val (ren, ridx) = if (params.slow) (s1_col_mask(c), s1_col_idx) else (s0_col_mask(c), s0_col_idx)
val wen = WireInit(doing_reset || (s1_update.valid && s1_update.bits.is_commit_update && s1_update_col_mask(c) && !ren))
if (params.slow) {
s2_req_rdata_all(c) := rdata
} else {
s2_req_rdata_all(c) := RegNext(rdata)
}
if (params.useFlops) {
val data = Reg(Vec(nSetsPerCol, Vec(bankWidth, UInt(2.W))))
when (wen && doing_reset) {
data(reset_idx) := VecInit(Seq.fill(bankWidth) { 2.U })
} .elsewhen (wen) {
for (i <- 0 until bankWidth) {
when (s1_update_wmask(i)) {
data(s1_update_col_idx)(i) := s1_update_wdata(i)
}
}
}
when (RegNext(ren) && !(wen && params.singlePorted.B)) {
rdata := data(RegNext(ridx))
}
} else {
val data = SyncReadMem(nSetsPerCol, Vec(bankWidth, UInt(2.W)))
data.suggestName(s"bim_col_${c}")
val r = if (params.singlePorted) data.read(ridx, ren && !wen) else data.read(ridx, ren)
rdata := r
when (wen) {
val widx = Mux(doing_reset, reset_idx, s1_update_col_idx)
val wdata = Mux(doing_reset, VecInit(Seq.fill(bankWidth) { 2.U }), s1_update_wdata)
val wmask = Mux(doing_reset, (~(0.U(bankWidth.W))), s1_update_wmask.asUInt)
data.write(widx, wdata, wmask.asBools)
}
}
}
when (s1_update_wmask.reduce(_||_) && s1_update.valid && s1_update.bits.is_commit_update) {
when (wrbypass_hit) {
wrbypass(wrbypass_hit_idx) := s1_update_wdata
} .otherwise {
wrbypass(wrbypass_enq_idx) := s1_update_wdata
wrbypass_idxs(wrbypass_enq_idx) := s1_update_idx
wrbypass_enq_idx := WrapInc(wrbypass_enq_idx, nWrBypassEntries)
}
}
}
| module bim_col_1( // @[bim.scala:157:29]
input [7:0] RW0_addr,
input RW0_en,
input RW0_clk,
input RW0_wmode,
input [7:0] RW0_wdata,
output [7:0] RW0_rdata,
input [3:0] RW0_wmask
);
bim_col_0_ext bim_col_0_ext ( // @[bim.scala:157:29]
.RW0_addr (RW0_addr),
.RW0_en (RW0_en),
.RW0_clk (RW0_clk),
.RW0_wmode (RW0_wmode),
.RW0_wdata (RW0_wdata),
.RW0_rdata (RW0_rdata),
.RW0_wmask (RW0_wmask)
); // @[bim.scala:157:29]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ResetCatchAndSync.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.{withClockAndReset, withReset}
/** Reset: asynchronous assert,
* synchronous de-assert
*
*/
class ResetCatchAndSync (sync: Int = 3) extends Module {
override def desiredName = s"ResetCatchAndSync_d${sync}"
val io = IO(new Bundle {
val sync_reset = Output(Bool())
val psd = Input(new PSDTestMode())
})
// Bypass both the resets to the flops themselves (to prevent DFT holes on
// those flops) and on the output of the synchronizer circuit (to control
// reset to any flops this circuit drives).
val post_psd_reset = Mux(io.psd.test_mode, io.psd.test_mode_reset, reset.asBool)
withReset(post_psd_reset) {
io.sync_reset := Mux(io.psd.test_mode, io.psd.test_mode_reset,
~AsyncResetSynchronizerShiftReg(true.B, sync))
}
}
object ResetCatchAndSync {
def apply(clk: Clock, rst: Bool, sync: Int = 3, name: Option[String] = None,
psd: Option[PSDTestMode] = None): Bool = {
withClockAndReset(clk, rst) {
val catcher = Module (new ResetCatchAndSync(sync))
if (name.isDefined) {catcher.suggestName(name.get)}
catcher.io.psd <> psd.getOrElse(WireDefault(0.U.asTypeOf(new PSDTestMode())))
catcher.io.sync_reset
}
}
def apply(clk: Clock, rst: Bool, sync: Int, name: String): Bool = apply(clk, rst, sync, Some(name))
def apply(clk: Clock, rst: Bool, name: String): Bool = apply(clk, rst, name = Some(name))
def apply(clk: Clock, rst: Bool, sync: Int, name: String, psd: PSDTestMode): Bool =
apply(clk, rst, sync, Some(name), Some(psd))
def apply(clk: Clock, rst: Bool, name: String, psd: PSDTestMode): Bool =
apply(clk, rst, name = Some(name), psd = Some(psd))
}
File ClockDomain.scala:
package freechips.rocketchip.prci
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
abstract class Domain(implicit p: Parameters) extends LazyModule with HasDomainCrossing
{
def clockBundle: ClockBundle
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
childClock := clockBundle.clock
childReset := clockBundle.reset
override def provideImplicitClockToLazyChildren = true
// these are just for backwards compatibility with external devices
// that were manually wiring themselves to the domain's clock/reset input:
val clock = IO(Output(chiselTypeOf(clockBundle.clock)))
val reset = IO(Output(chiselTypeOf(clockBundle.reset)))
clock := clockBundle.clock
reset := clockBundle.reset
}
}
abstract class ClockDomain(implicit p: Parameters) extends Domain with HasClockDomainCrossing
class ClockSinkDomain(val clockSinkParams: ClockSinkParameters)(implicit p: Parameters) extends ClockDomain
{
def this(take: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSinkParameters(take = take, name = name))
val clockNode = ClockSinkNode(Seq(clockSinkParams))
def clockBundle = clockNode.in.head._1
override lazy val desiredName = (clockSinkParams.name.toSeq :+ "ClockSinkDomain").mkString
}
class ClockSourceDomain(val clockSourceParams: ClockSourceParameters)(implicit p: Parameters) extends ClockDomain
{
def this(give: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSourceParameters(give = give, name = name))
val clockNode = ClockSourceNode(Seq(clockSourceParams))
def clockBundle = clockNode.out.head._1
override lazy val desiredName = (clockSourceParams.name.toSeq :+ "ClockSourceDomain").mkString
}
abstract class ResetDomain(implicit p: Parameters) extends Domain with HasResetDomainCrossing
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File PeripheryTLSerial.scala:
package testchipip.serdes
import chisel3._
import chisel3.util._
import chisel3.experimental.dataview._
import org.chipsalliance.cde.config.{Parameters, Field}
import freechips.rocketchip.subsystem._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.devices.tilelink._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.util._
import freechips.rocketchip.prci._
import testchipip.util.{ClockedIO}
import testchipip.soc.{OBUS}
// Parameters for a read-only-memory that appears over serial-TL
case class ManagerROMParams(
address: BigInt = 0x20000,
size: Int = 0x10000,
contentFileName: Option[String] = None) // If unset, generates a JALR to DRAM_BASE
// Parameters for a read/write memory that appears over serial-TL
case class ManagerRAMParams(
address: BigInt,
size: BigInt)
// Parameters for a coherent cacheable read/write memory that appears over serial-TL
case class ManagerCOHParams(
address: BigInt,
size: BigInt)
// Parameters for a set of memory regions that appear over serial-TL
case class SerialTLManagerParams(
memParams: Seq[ManagerRAMParams] = Nil,
romParams: Seq[ManagerROMParams] = Nil,
cohParams: Seq[ManagerCOHParams] = Nil,
isMemoryDevice: Boolean = false,
sinkIdBits: Int = 8,
totalIdBits: Int = 8,
cacheIdBits: Int = 2,
slaveWhere: TLBusWrapperLocation = OBUS
)
// Parameters for a TL client which may probe this system over serial-TL
case class SerialTLClientParams(
totalIdBits: Int = 8,
cacheIdBits: Int = 2,
masterWhere: TLBusWrapperLocation = FBUS,
supportsProbe: Boolean = false
)
// The SerialTL can be configured to be bidirectional if serialTLManagerParams is set
case class SerialTLParams(
client: Option[SerialTLClientParams] = None,
manager: Option[SerialTLManagerParams] = None,
phyParams: SerialPhyParams = ExternalSyncSerialPhyParams(),
bundleParams: TLBundleParameters = TLSerdesser.STANDARD_TLBUNDLE_PARAMS)
case object SerialTLKey extends Field[Seq[SerialTLParams]](Nil)
trait CanHavePeripheryTLSerial { this: BaseSubsystem =>
private val portName = "serial-tl"
val tlChannels = 5
val (serdessers, serial_tls, serial_tl_debugs) = p(SerialTLKey).zipWithIndex.map { case (params, sid) =>
val name = s"serial_tl_$sid"
lazy val manager_bus = params.manager.map(m => locateTLBusWrapper(m.slaveWhere))
lazy val client_bus = params.client.map(c => locateTLBusWrapper(c.masterWhere))
val clientPortParams = params.client.map { c => TLMasterPortParameters.v1(
clients = Seq.tabulate(1 << c.cacheIdBits){ i => TLMasterParameters.v1(
name = s"serial_tl_${sid}_${i}",
sourceId = IdRange(i << (c.totalIdBits - c.cacheIdBits), (i + 1) << (c.totalIdBits - c.cacheIdBits)),
supportsProbe = if (c.supportsProbe) TransferSizes(client_bus.get.blockBytes, client_bus.get.blockBytes) else TransferSizes.none
)}
)}
val managerPortParams = params.manager.map { m =>
val memParams = m.memParams
val romParams = m.romParams
val cohParams = m.cohParams
val memDevice = if (m.isMemoryDevice) new MemoryDevice else new SimpleDevice("lbwif-readwrite", Nil)
val romDevice = new SimpleDevice("lbwif-readonly", Nil)
val blockBytes = manager_bus.get.blockBytes
TLSlavePortParameters.v1(
managers = memParams.map { memParams => TLSlaveParameters.v1(
address = AddressSet.misaligned(memParams.address, memParams.size),
resources = memDevice.reg,
regionType = RegionType.UNCACHED, // cacheable
executable = true,
supportsGet = TransferSizes(1, blockBytes),
supportsPutFull = TransferSizes(1, blockBytes),
supportsPutPartial = TransferSizes(1, blockBytes)
)} ++ romParams.map { romParams => TLSlaveParameters.v1(
address = List(AddressSet(romParams.address, romParams.size-1)),
resources = romDevice.reg,
regionType = RegionType.UNCACHED, // cacheable
executable = true,
supportsGet = TransferSizes(1, blockBytes),
fifoId = Some(0)
)} ++ cohParams.map { cohParams => TLSlaveParameters.v1(
address = AddressSet.misaligned(cohParams.address, cohParams.size),
regionType = RegionType.TRACKED, // cacheable
executable = true,
supportsAcquireT = TransferSizes(1, blockBytes),
supportsAcquireB = TransferSizes(1, blockBytes),
supportsGet = TransferSizes(1, blockBytes),
supportsPutFull = TransferSizes(1, blockBytes),
supportsPutPartial = TransferSizes(1, blockBytes)
)},
beatBytes = manager_bus.get.beatBytes,
endSinkId = if (cohParams.isEmpty) 0 else (1 << m.sinkIdBits),
minLatency = 1
)
}
val serial_tl_domain = LazyModule(new ClockSinkDomain(name=Some(s"SerialTL$sid")))
serial_tl_domain.clockNode := manager_bus.getOrElse(client_bus.get).fixedClockNode
if (manager_bus.isDefined) require(manager_bus.get.dtsFrequency.isDefined,
s"Manager bus ${manager_bus.get.busName} must provide a frequency")
if (client_bus.isDefined) require(client_bus.get.dtsFrequency.isDefined,
s"Client bus ${client_bus.get.busName} must provide a frequency")
if (manager_bus.isDefined && client_bus.isDefined) {
val managerFreq = manager_bus.get.dtsFrequency.get
val clientFreq = client_bus.get.dtsFrequency.get
require(managerFreq == clientFreq, s"Mismatching manager freq $managerFreq != client freq $clientFreq")
}
val serdesser = serial_tl_domain { LazyModule(new TLSerdesser(
flitWidth = params.phyParams.flitWidth,
clientPortParams = clientPortParams,
managerPortParams = managerPortParams,
bundleParams = params.bundleParams,
nameSuffix = Some(name)
)) }
serdesser.managerNode.foreach { managerNode =>
val maxClients = 1 << params.manager.get.cacheIdBits
val maxIdsPerClient = 1 << (params.manager.get.totalIdBits - params.manager.get.cacheIdBits)
manager_bus.get.coupleTo(s"port_named_${name}_out") {
(managerNode
:= TLProbeBlocker(p(CacheBlockBytes))
:= TLSourceAdjuster(maxClients, maxIdsPerClient)
:= TLSourceCombiner(maxIdsPerClient)
:= TLWidthWidget(manager_bus.get.beatBytes)
:= _)
}
}
serdesser.clientNode.foreach { clientNode =>
client_bus.get.coupleFrom(s"port_named_${name}_in") { _ := TLBuffer() := clientNode }
}
// If we provide a clock, generate a clock domain for the outgoing clock
val serial_tl_clock_freqMHz = params.phyParams match {
case params: InternalSyncSerialPhyParams => Some(params.freqMHz)
case params: ExternalSyncSerialPhyParams => None
case params: SourceSyncSerialPhyParams => Some(params.freqMHz)
}
val serial_tl_clock_node = serial_tl_clock_freqMHz.map { f =>
serial_tl_domain { ClockSinkNode(Seq(ClockSinkParameters(take=Some(ClockParameters(f))))) }
}
serial_tl_clock_node.foreach(_ := ClockGroup()(p, ValName(s"${name}_clock")) := allClockGroupsNode)
val inner_io = serial_tl_domain { InModuleBody {
val inner_io = IO(params.phyParams.genIO).suggestName(name)
inner_io match {
case io: InternalSyncPhitIO => {
// Outer clock comes from the clock node. Synchronize the serdesser's reset to that
// clock to get the outer reset
val outer_clock = serial_tl_clock_node.get.in.head._1.clock
io.clock_out := outer_clock
val phy = Module(new DecoupledSerialPhy(tlChannels, params.phyParams))
phy.io.outer_clock := outer_clock
phy.io.outer_reset := ResetCatchAndSync(outer_clock, serdesser.module.reset.asBool)
phy.io.inner_clock := serdesser.module.clock
phy.io.inner_reset := serdesser.module.reset
phy.io.outer_ser <> io.viewAsSupertype(new DecoupledPhitIO(io.phitWidth))
phy.io.inner_ser <> serdesser.module.io.ser
}
case io: ExternalSyncPhitIO => {
// Outer clock comes from the IO. Synchronize the serdesser's reset to that
// clock to get the outer reset
val outer_clock = io.clock_in
val outer_reset = ResetCatchAndSync(outer_clock, serdesser.module.reset.asBool)
val phy = Module(new DecoupledSerialPhy(tlChannels, params.phyParams))
phy.io.outer_clock := outer_clock
phy.io.outer_reset := ResetCatchAndSync(outer_clock, serdesser.module.reset.asBool)
phy.io.inner_clock := serdesser.module.clock
phy.io.inner_reset := serdesser.module.reset
phy.io.outer_ser <> io.viewAsSupertype(new DecoupledPhitIO(params.phyParams.phitWidth))
phy.io.inner_ser <> serdesser.module.io.ser
}
case io: SourceSyncPhitIO => {
// 3 clock domains -
// - serdesser's "Inner clock": synchronizes signals going to the digital logic
// - outgoing clock: synchronizes signals going out
// - incoming clock: synchronizes signals coming in
val outgoing_clock = serial_tl_clock_node.get.in.head._1.clock
val outgoing_reset = ResetCatchAndSync(outgoing_clock, serdesser.module.reset.asBool)
val incoming_clock = io.clock_in
val incoming_reset = ResetCatchAndSync(incoming_clock, io.reset_in.asBool)
io.clock_out := outgoing_clock
io.reset_out := outgoing_reset.asAsyncReset
val phy = Module(new CreditedSerialPhy(tlChannels, params.phyParams))
phy.io.incoming_clock := incoming_clock
phy.io.incoming_reset := incoming_reset
phy.io.outgoing_clock := outgoing_clock
phy.io.outgoing_reset := outgoing_reset
phy.io.inner_clock := serdesser.module.clock
phy.io.inner_reset := serdesser.module.reset
phy.io.inner_ser <> serdesser.module.io.ser
phy.io.outer_ser <> io.viewAsSupertype(new ValidPhitIO(params.phyParams.phitWidth))
}
}
inner_io
}}
val outer_io = InModuleBody {
val outer_io = IO(params.phyParams.genIO).suggestName(name)
outer_io <> inner_io
outer_io
}
val inner_debug_io = serial_tl_domain { InModuleBody {
val inner_debug_io = IO(new SerdesDebugIO).suggestName(s"${name}_debug")
inner_debug_io := serdesser.module.io.debug
inner_debug_io
}}
val outer_debug_io = InModuleBody {
val outer_debug_io = IO(new SerdesDebugIO).suggestName(s"${name}_debug")
outer_debug_io := inner_debug_io
outer_debug_io
}
(serdesser, outer_io, outer_debug_io)
}.unzip3
}
| module SerialTL0ClockSinkDomain( // @[ClockDomain.scala:14:9]
input auto_serdesser_client_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_serdesser_client_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_serdesser_client_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_serdesser_client_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_serdesser_client_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_serdesser_client_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_serdesser_client_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_serdesser_client_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_serdesser_client_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_serdesser_client_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_serdesser_client_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_serdesser_client_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_serdesser_client_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_serdesser_client_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_serdesser_client_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_serdesser_client_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_serdesser_client_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_serdesser_client_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_serdesser_client_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_serdesser_client_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_clock_in_clock, // @[LazyModuleImp.scala:107:25]
input auto_clock_in_reset, // @[LazyModuleImp.scala:107:25]
output serial_tl_0_in_ready, // @[PeripheryTLSerial.scala:165:24]
input serial_tl_0_in_valid, // @[PeripheryTLSerial.scala:165:24]
input [31:0] serial_tl_0_in_bits_phit, // @[PeripheryTLSerial.scala:165:24]
input serial_tl_0_out_ready, // @[PeripheryTLSerial.scala:165:24]
output serial_tl_0_out_valid, // @[PeripheryTLSerial.scala:165:24]
output [31:0] serial_tl_0_out_bits_phit, // @[PeripheryTLSerial.scala:165:24]
input serial_tl_0_clock_in, // @[PeripheryTLSerial.scala:165:24]
output serial_tl_0_debug_ser_busy, // @[PeripheryTLSerial.scala:226:30]
output serial_tl_0_debug_des_busy // @[PeripheryTLSerial.scala:226:30]
);
wire _phy_io_outer_reset_catcher_io_sync_reset; // @[ResetCatchAndSync.scala:39:28]
wire _phy_io_inner_ser_0_in_valid; // @[PeripheryTLSerial.scala:186:27]
wire [31:0] _phy_io_inner_ser_0_in_bits_flit; // @[PeripheryTLSerial.scala:186:27]
wire _phy_io_inner_ser_1_in_valid; // @[PeripheryTLSerial.scala:186:27]
wire [31:0] _phy_io_inner_ser_1_in_bits_flit; // @[PeripheryTLSerial.scala:186:27]
wire _phy_io_inner_ser_1_out_ready; // @[PeripheryTLSerial.scala:186:27]
wire _phy_io_inner_ser_2_in_valid; // @[PeripheryTLSerial.scala:186:27]
wire [31:0] _phy_io_inner_ser_2_in_bits_flit; // @[PeripheryTLSerial.scala:186:27]
wire _phy_io_inner_ser_3_in_valid; // @[PeripheryTLSerial.scala:186:27]
wire [31:0] _phy_io_inner_ser_3_in_bits_flit; // @[PeripheryTLSerial.scala:186:27]
wire _phy_io_inner_ser_3_out_ready; // @[PeripheryTLSerial.scala:186:27]
wire _phy_io_inner_ser_4_in_valid; // @[PeripheryTLSerial.scala:186:27]
wire [31:0] _phy_io_inner_ser_4_in_bits_flit; // @[PeripheryTLSerial.scala:186:27]
wire _serdesser_io_ser_0_in_ready; // @[PeripheryTLSerial.scala:129:50]
wire _serdesser_io_ser_1_in_ready; // @[PeripheryTLSerial.scala:129:50]
wire _serdesser_io_ser_1_out_valid; // @[PeripheryTLSerial.scala:129:50]
wire [31:0] _serdesser_io_ser_1_out_bits_flit; // @[PeripheryTLSerial.scala:129:50]
wire _serdesser_io_ser_2_in_ready; // @[PeripheryTLSerial.scala:129:50]
wire _serdesser_io_ser_3_in_ready; // @[PeripheryTLSerial.scala:129:50]
wire _serdesser_io_ser_3_out_valid; // @[PeripheryTLSerial.scala:129:50]
wire [31:0] _serdesser_io_ser_3_out_bits_flit; // @[PeripheryTLSerial.scala:129:50]
wire _serdesser_io_ser_4_in_ready; // @[PeripheryTLSerial.scala:129:50]
wire _serdesser_io_debug_ser_busy; // @[PeripheryTLSerial.scala:129:50]
wire _serdesser_io_debug_des_busy; // @[PeripheryTLSerial.scala:129:50]
wire auto_serdesser_client_out_a_ready_0 = auto_serdesser_client_out_a_ready; // @[ClockDomain.scala:14:9]
wire auto_serdesser_client_out_d_valid_0 = auto_serdesser_client_out_d_valid; // @[ClockDomain.scala:14:9]
wire [2:0] auto_serdesser_client_out_d_bits_opcode_0 = auto_serdesser_client_out_d_bits_opcode; // @[ClockDomain.scala:14:9]
wire [1:0] auto_serdesser_client_out_d_bits_param_0 = auto_serdesser_client_out_d_bits_param; // @[ClockDomain.scala:14:9]
wire [3:0] auto_serdesser_client_out_d_bits_size_0 = auto_serdesser_client_out_d_bits_size; // @[ClockDomain.scala:14:9]
wire [3:0] auto_serdesser_client_out_d_bits_source_0 = auto_serdesser_client_out_d_bits_source; // @[ClockDomain.scala:14:9]
wire [2:0] auto_serdesser_client_out_d_bits_sink_0 = auto_serdesser_client_out_d_bits_sink; // @[ClockDomain.scala:14:9]
wire auto_serdesser_client_out_d_bits_denied_0 = auto_serdesser_client_out_d_bits_denied; // @[ClockDomain.scala:14:9]
wire [63:0] auto_serdesser_client_out_d_bits_data_0 = auto_serdesser_client_out_d_bits_data; // @[ClockDomain.scala:14:9]
wire auto_serdesser_client_out_d_bits_corrupt_0 = auto_serdesser_client_out_d_bits_corrupt; // @[ClockDomain.scala:14:9]
wire auto_clock_in_clock_0 = auto_clock_in_clock; // @[ClockDomain.scala:14:9]
wire auto_clock_in_reset_0 = auto_clock_in_reset; // @[ClockDomain.scala:14:9]
wire serial_tl_0_in_valid_0 = serial_tl_0_in_valid; // @[ClockDomain.scala:14:9]
wire [31:0] serial_tl_0_in_bits_phit_0 = serial_tl_0_in_bits_phit; // @[ClockDomain.scala:14:9]
wire serial_tl_0_out_ready_0 = serial_tl_0_out_ready; // @[ClockDomain.scala:14:9]
wire serial_tl_0_clock_in_0 = serial_tl_0_clock_in; // @[ClockDomain.scala:14:9]
wire _childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire _outer_reset_catcher_io_psd_WIRE_test_mode = 1'h0; // @[ResetCatchAndSync.scala:41:63]
wire _outer_reset_catcher_io_psd_WIRE_test_mode_reset = 1'h0; // @[ResetCatchAndSync.scala:41:63]
wire _outer_reset_catcher_io_psd_WIRE_1_test_mode = 1'h0; // @[ResetCatchAndSync.scala:41:50]
wire _outer_reset_catcher_io_psd_WIRE_1_test_mode_reset = 1'h0; // @[ResetCatchAndSync.scala:41:50]
wire _phy_io_outer_reset_catcher_io_psd_WIRE_test_mode = 1'h0; // @[ResetCatchAndSync.scala:41:63]
wire _phy_io_outer_reset_catcher_io_psd_WIRE_test_mode_reset = 1'h0; // @[ResetCatchAndSync.scala:41:63]
wire _phy_io_outer_reset_catcher_io_psd_WIRE_1_test_mode = 1'h0; // @[ResetCatchAndSync.scala:41:50]
wire _phy_io_outer_reset_catcher_io_psd_WIRE_1_test_mode_reset = 1'h0; // @[ResetCatchAndSync.scala:41:50]
wire clockNodeIn_clock = auto_clock_in_clock_0; // @[ClockDomain.scala:14:9]
wire clockNodeIn_reset = auto_clock_in_reset_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_serdesser_client_out_a_bits_opcode_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_serdesser_client_out_a_bits_param_0; // @[ClockDomain.scala:14:9]
wire [3:0] auto_serdesser_client_out_a_bits_size_0; // @[ClockDomain.scala:14:9]
wire [3:0] auto_serdesser_client_out_a_bits_source_0; // @[ClockDomain.scala:14:9]
wire [31:0] auto_serdesser_client_out_a_bits_address_0; // @[ClockDomain.scala:14:9]
wire [7:0] auto_serdesser_client_out_a_bits_mask_0; // @[ClockDomain.scala:14:9]
wire [63:0] auto_serdesser_client_out_a_bits_data_0; // @[ClockDomain.scala:14:9]
wire auto_serdesser_client_out_a_bits_corrupt_0; // @[ClockDomain.scala:14:9]
wire auto_serdesser_client_out_a_valid_0; // @[ClockDomain.scala:14:9]
wire auto_serdesser_client_out_d_ready_0; // @[ClockDomain.scala:14:9]
wire serial_tl_0_in_ready_0; // @[ClockDomain.scala:14:9]
wire [31:0] serial_tl_0_out_bits_phit_0; // @[ClockDomain.scala:14:9]
wire serial_tl_0_out_valid_0; // @[ClockDomain.scala:14:9]
wire childClock; // @[LazyModuleImp.scala:155:31]
wire childReset; // @[LazyModuleImp.scala:158:31]
wire _outer_reset_T = childReset; // @[PeripheryTLSerial.scala:185:83]
wire _phy_io_outer_reset_T = childReset; // @[PeripheryTLSerial.scala:188:87]
assign childClock = clockNodeIn_clock; // @[MixedNode.scala:551:17]
assign childReset = clockNodeIn_reset; // @[MixedNode.scala:551:17]
TLSerdesser_serial_tl_0 serdesser ( // @[PeripheryTLSerial.scala:129:50]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset), // @[LazyModuleImp.scala:158:31]
.auto_client_out_a_ready (auto_serdesser_client_out_a_ready_0), // @[ClockDomain.scala:14:9]
.auto_client_out_a_valid (auto_serdesser_client_out_a_valid_0),
.auto_client_out_a_bits_opcode (auto_serdesser_client_out_a_bits_opcode_0),
.auto_client_out_a_bits_param (auto_serdesser_client_out_a_bits_param_0),
.auto_client_out_a_bits_size (auto_serdesser_client_out_a_bits_size_0),
.auto_client_out_a_bits_source (auto_serdesser_client_out_a_bits_source_0),
.auto_client_out_a_bits_address (auto_serdesser_client_out_a_bits_address_0),
.auto_client_out_a_bits_mask (auto_serdesser_client_out_a_bits_mask_0),
.auto_client_out_a_bits_data (auto_serdesser_client_out_a_bits_data_0),
.auto_client_out_a_bits_corrupt (auto_serdesser_client_out_a_bits_corrupt_0),
.auto_client_out_d_ready (auto_serdesser_client_out_d_ready_0),
.auto_client_out_d_valid (auto_serdesser_client_out_d_valid_0), // @[ClockDomain.scala:14:9]
.auto_client_out_d_bits_opcode (auto_serdesser_client_out_d_bits_opcode_0), // @[ClockDomain.scala:14:9]
.auto_client_out_d_bits_param (auto_serdesser_client_out_d_bits_param_0), // @[ClockDomain.scala:14:9]
.auto_client_out_d_bits_size (auto_serdesser_client_out_d_bits_size_0), // @[ClockDomain.scala:14:9]
.auto_client_out_d_bits_source (auto_serdesser_client_out_d_bits_source_0), // @[ClockDomain.scala:14:9]
.auto_client_out_d_bits_sink (auto_serdesser_client_out_d_bits_sink_0), // @[ClockDomain.scala:14:9]
.auto_client_out_d_bits_denied (auto_serdesser_client_out_d_bits_denied_0), // @[ClockDomain.scala:14:9]
.auto_client_out_d_bits_data (auto_serdesser_client_out_d_bits_data_0), // @[ClockDomain.scala:14:9]
.auto_client_out_d_bits_corrupt (auto_serdesser_client_out_d_bits_corrupt_0), // @[ClockDomain.scala:14:9]
.io_ser_0_in_ready (_serdesser_io_ser_0_in_ready),
.io_ser_0_in_valid (_phy_io_inner_ser_0_in_valid), // @[PeripheryTLSerial.scala:186:27]
.io_ser_0_in_bits_flit (_phy_io_inner_ser_0_in_bits_flit), // @[PeripheryTLSerial.scala:186:27]
.io_ser_1_in_ready (_serdesser_io_ser_1_in_ready),
.io_ser_1_in_valid (_phy_io_inner_ser_1_in_valid), // @[PeripheryTLSerial.scala:186:27]
.io_ser_1_in_bits_flit (_phy_io_inner_ser_1_in_bits_flit), // @[PeripheryTLSerial.scala:186:27]
.io_ser_1_out_ready (_phy_io_inner_ser_1_out_ready), // @[PeripheryTLSerial.scala:186:27]
.io_ser_1_out_valid (_serdesser_io_ser_1_out_valid),
.io_ser_1_out_bits_flit (_serdesser_io_ser_1_out_bits_flit),
.io_ser_2_in_ready (_serdesser_io_ser_2_in_ready),
.io_ser_2_in_valid (_phy_io_inner_ser_2_in_valid), // @[PeripheryTLSerial.scala:186:27]
.io_ser_2_in_bits_flit (_phy_io_inner_ser_2_in_bits_flit), // @[PeripheryTLSerial.scala:186:27]
.io_ser_3_in_ready (_serdesser_io_ser_3_in_ready),
.io_ser_3_in_valid (_phy_io_inner_ser_3_in_valid), // @[PeripheryTLSerial.scala:186:27]
.io_ser_3_in_bits_flit (_phy_io_inner_ser_3_in_bits_flit), // @[PeripheryTLSerial.scala:186:27]
.io_ser_3_out_ready (_phy_io_inner_ser_3_out_ready), // @[PeripheryTLSerial.scala:186:27]
.io_ser_3_out_valid (_serdesser_io_ser_3_out_valid),
.io_ser_3_out_bits_flit (_serdesser_io_ser_3_out_bits_flit),
.io_ser_4_in_ready (_serdesser_io_ser_4_in_ready),
.io_ser_4_in_valid (_phy_io_inner_ser_4_in_valid), // @[PeripheryTLSerial.scala:186:27]
.io_ser_4_in_bits_flit (_phy_io_inner_ser_4_in_bits_flit), // @[PeripheryTLSerial.scala:186:27]
.io_debug_ser_busy (_serdesser_io_debug_ser_busy),
.io_debug_des_busy (_serdesser_io_debug_des_busy)
); // @[PeripheryTLSerial.scala:129:50]
ResetCatchAndSync_d3 outer_reset_catcher ( // @[ResetCatchAndSync.scala:39:28]
.clock (serial_tl_0_clock_in_0), // @[ClockDomain.scala:14:9]
.reset (_outer_reset_T) // @[PeripheryTLSerial.scala:185:83]
); // @[ResetCatchAndSync.scala:39:28]
DecoupledSerialPhy phy ( // @[PeripheryTLSerial.scala:186:27]
.io_outer_clock (serial_tl_0_clock_in_0), // @[ClockDomain.scala:14:9]
.io_outer_reset (_phy_io_outer_reset_catcher_io_sync_reset), // @[ResetCatchAndSync.scala:39:28]
.io_inner_clock (childClock), // @[LazyModuleImp.scala:155:31]
.io_inner_reset (childReset), // @[LazyModuleImp.scala:158:31]
.io_outer_ser_in_ready (serial_tl_0_in_ready_0),
.io_outer_ser_in_valid (serial_tl_0_in_valid_0), // @[ClockDomain.scala:14:9]
.io_outer_ser_in_bits_phit (serial_tl_0_in_bits_phit_0), // @[ClockDomain.scala:14:9]
.io_outer_ser_out_ready (serial_tl_0_out_ready_0), // @[ClockDomain.scala:14:9]
.io_outer_ser_out_valid (serial_tl_0_out_valid_0),
.io_outer_ser_out_bits_phit (serial_tl_0_out_bits_phit_0),
.io_inner_ser_0_in_ready (_serdesser_io_ser_0_in_ready), // @[PeripheryTLSerial.scala:129:50]
.io_inner_ser_0_in_valid (_phy_io_inner_ser_0_in_valid),
.io_inner_ser_0_in_bits_flit (_phy_io_inner_ser_0_in_bits_flit),
.io_inner_ser_1_in_ready (_serdesser_io_ser_1_in_ready), // @[PeripheryTLSerial.scala:129:50]
.io_inner_ser_1_in_valid (_phy_io_inner_ser_1_in_valid),
.io_inner_ser_1_in_bits_flit (_phy_io_inner_ser_1_in_bits_flit),
.io_inner_ser_1_out_ready (_phy_io_inner_ser_1_out_ready),
.io_inner_ser_1_out_valid (_serdesser_io_ser_1_out_valid), // @[PeripheryTLSerial.scala:129:50]
.io_inner_ser_1_out_bits_flit (_serdesser_io_ser_1_out_bits_flit), // @[PeripheryTLSerial.scala:129:50]
.io_inner_ser_2_in_ready (_serdesser_io_ser_2_in_ready), // @[PeripheryTLSerial.scala:129:50]
.io_inner_ser_2_in_valid (_phy_io_inner_ser_2_in_valid),
.io_inner_ser_2_in_bits_flit (_phy_io_inner_ser_2_in_bits_flit),
.io_inner_ser_3_in_ready (_serdesser_io_ser_3_in_ready), // @[PeripheryTLSerial.scala:129:50]
.io_inner_ser_3_in_valid (_phy_io_inner_ser_3_in_valid),
.io_inner_ser_3_in_bits_flit (_phy_io_inner_ser_3_in_bits_flit),
.io_inner_ser_3_out_ready (_phy_io_inner_ser_3_out_ready),
.io_inner_ser_3_out_valid (_serdesser_io_ser_3_out_valid), // @[PeripheryTLSerial.scala:129:50]
.io_inner_ser_3_out_bits_flit (_serdesser_io_ser_3_out_bits_flit), // @[PeripheryTLSerial.scala:129:50]
.io_inner_ser_4_in_ready (_serdesser_io_ser_4_in_ready), // @[PeripheryTLSerial.scala:129:50]
.io_inner_ser_4_in_valid (_phy_io_inner_ser_4_in_valid),
.io_inner_ser_4_in_bits_flit (_phy_io_inner_ser_4_in_bits_flit)
); // @[PeripheryTLSerial.scala:186:27]
ResetCatchAndSync_d3_1 phy_io_outer_reset_catcher ( // @[ResetCatchAndSync.scala:39:28]
.clock (serial_tl_0_clock_in_0), // @[ClockDomain.scala:14:9]
.reset (_phy_io_outer_reset_T), // @[PeripheryTLSerial.scala:188:87]
.io_sync_reset (_phy_io_outer_reset_catcher_io_sync_reset)
); // @[ResetCatchAndSync.scala:39:28]
assign auto_serdesser_client_out_a_valid = auto_serdesser_client_out_a_valid_0; // @[ClockDomain.scala:14:9]
assign auto_serdesser_client_out_a_bits_opcode = auto_serdesser_client_out_a_bits_opcode_0; // @[ClockDomain.scala:14:9]
assign auto_serdesser_client_out_a_bits_param = auto_serdesser_client_out_a_bits_param_0; // @[ClockDomain.scala:14:9]
assign auto_serdesser_client_out_a_bits_size = auto_serdesser_client_out_a_bits_size_0; // @[ClockDomain.scala:14:9]
assign auto_serdesser_client_out_a_bits_source = auto_serdesser_client_out_a_bits_source_0; // @[ClockDomain.scala:14:9]
assign auto_serdesser_client_out_a_bits_address = auto_serdesser_client_out_a_bits_address_0; // @[ClockDomain.scala:14:9]
assign auto_serdesser_client_out_a_bits_mask = auto_serdesser_client_out_a_bits_mask_0; // @[ClockDomain.scala:14:9]
assign auto_serdesser_client_out_a_bits_data = auto_serdesser_client_out_a_bits_data_0; // @[ClockDomain.scala:14:9]
assign auto_serdesser_client_out_a_bits_corrupt = auto_serdesser_client_out_a_bits_corrupt_0; // @[ClockDomain.scala:14:9]
assign auto_serdesser_client_out_d_ready = auto_serdesser_client_out_d_ready_0; // @[ClockDomain.scala:14:9]
assign serial_tl_0_in_ready = serial_tl_0_in_ready_0; // @[ClockDomain.scala:14:9]
assign serial_tl_0_out_valid = serial_tl_0_out_valid_0; // @[ClockDomain.scala:14:9]
assign serial_tl_0_out_bits_phit = serial_tl_0_out_bits_phit_0; // @[ClockDomain.scala:14:9]
assign serial_tl_0_debug_ser_busy = _serdesser_io_debug_ser_busy; // @[PeripheryTLSerial.scala:129:50]
assign serial_tl_0_debug_des_busy = _serdesser_io_debug_des_busy; // @[PeripheryTLSerial.scala:129:50]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File PMP.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util.{Cat, log2Ceil}
import org.chipsalliance.cde.config._
import freechips.rocketchip.tile._
import freechips.rocketchip.util._
class PMPConfig extends Bundle {
val l = Bool()
val res = UInt(2.W)
val a = UInt(2.W)
val x = Bool()
val w = Bool()
val r = Bool()
}
object PMP {
def lgAlign = 2
def apply(reg: PMPReg): PMP = {
val pmp = Wire(new PMP()(reg.p))
pmp.cfg := reg.cfg
pmp.addr := reg.addr
pmp.mask := pmp.computeMask
pmp
}
}
class PMPReg(implicit p: Parameters) extends CoreBundle()(p) {
val cfg = new PMPConfig
val addr = UInt((paddrBits - PMP.lgAlign).W)
def reset(): Unit = {
cfg.a := 0.U
cfg.l := 0.U
}
def readAddr = if (pmpGranularity.log2 == PMP.lgAlign) addr else {
val mask = ((BigInt(1) << (pmpGranularity.log2 - PMP.lgAlign)) - 1).U
Mux(napot, addr | (mask >> 1), ~(~addr | mask))
}
def napot = cfg.a(1)
def torNotNAPOT = cfg.a(0)
def tor = !napot && torNotNAPOT
def cfgLocked = cfg.l
def addrLocked(next: PMPReg) = cfgLocked || next.cfgLocked && next.tor
}
class PMP(implicit p: Parameters) extends PMPReg {
val mask = UInt(paddrBits.W)
import PMP._
def computeMask = {
val base = Cat(addr, cfg.a(0)) | ((pmpGranularity - 1).U >> lgAlign)
Cat(base & ~(base + 1.U), ((1 << lgAlign) - 1).U)
}
private def comparand = ~(~(addr << lgAlign) | (pmpGranularity - 1).U)
private def pow2Match(x: UInt, lgSize: UInt, lgMaxSize: Int) = {
def eval(a: UInt, b: UInt, m: UInt) = ((a ^ b) & ~m) === 0.U
if (lgMaxSize <= pmpGranularity.log2) {
eval(x, comparand, mask)
} else {
// break up the circuit; the MSB part will be CSE'd
val lsbMask = mask | UIntToOH1(lgSize, lgMaxSize)
val msbMatch = eval(x >> lgMaxSize, comparand >> lgMaxSize, mask >> lgMaxSize)
val lsbMatch = eval(x(lgMaxSize-1, 0), comparand(lgMaxSize-1, 0), lsbMask(lgMaxSize-1, 0))
msbMatch && lsbMatch
}
}
private def boundMatch(x: UInt, lsbMask: UInt, lgMaxSize: Int) = {
if (lgMaxSize <= pmpGranularity.log2) {
x < comparand
} else {
// break up the circuit; the MSB part will be CSE'd
val msbsLess = (x >> lgMaxSize) < (comparand >> lgMaxSize)
val msbsEqual = ((x >> lgMaxSize) ^ (comparand >> lgMaxSize)) === 0.U
val lsbsLess = (x(lgMaxSize-1, 0) | lsbMask) < comparand(lgMaxSize-1, 0)
msbsLess || (msbsEqual && lsbsLess)
}
}
private def lowerBoundMatch(x: UInt, lgSize: UInt, lgMaxSize: Int) =
!boundMatch(x, UIntToOH1(lgSize, lgMaxSize), lgMaxSize)
private def upperBoundMatch(x: UInt, lgMaxSize: Int) =
boundMatch(x, 0.U, lgMaxSize)
private def rangeMatch(x: UInt, lgSize: UInt, lgMaxSize: Int, prev: PMP) =
prev.lowerBoundMatch(x, lgSize, lgMaxSize) && upperBoundMatch(x, lgMaxSize)
private def pow2Homogeneous(x: UInt, pgLevel: UInt) = {
val maskHomogeneous = pgLevelMap { idxBits => if (idxBits > paddrBits) false.B else mask(idxBits - 1) } (pgLevel)
maskHomogeneous || (pgLevelMap { idxBits => ((x ^ comparand) >> idxBits) =/= 0.U } (pgLevel))
}
private def pgLevelMap[T](f: Int => T) = (0 until pgLevels).map { i =>
f(pgIdxBits + (pgLevels - 1 - i) * pgLevelBits)
}
private def rangeHomogeneous(x: UInt, pgLevel: UInt, prev: PMP) = {
val beginsAfterLower = !(x < prev.comparand)
val beginsAfterUpper = !(x < comparand)
val pgMask = pgLevelMap { idxBits => (((BigInt(1) << paddrBits) - (BigInt(1) << idxBits)) max 0).U } (pgLevel)
val endsBeforeLower = (x & pgMask) < (prev.comparand & pgMask)
val endsBeforeUpper = (x & pgMask) < (comparand & pgMask)
endsBeforeLower || beginsAfterUpper || (beginsAfterLower && endsBeforeUpper)
}
// returns whether this PMP completely contains, or contains none of, a page
def homogeneous(x: UInt, pgLevel: UInt, prev: PMP): Bool =
Mux(napot, pow2Homogeneous(x, pgLevel), !torNotNAPOT || rangeHomogeneous(x, pgLevel, prev))
// returns whether this matching PMP fully contains the access
def aligned(x: UInt, lgSize: UInt, lgMaxSize: Int, prev: PMP): Bool = if (lgMaxSize <= pmpGranularity.log2) true.B else {
val lsbMask = UIntToOH1(lgSize, lgMaxSize)
val straddlesLowerBound = ((x >> lgMaxSize) ^ (prev.comparand >> lgMaxSize)) === 0.U && (prev.comparand(lgMaxSize-1, 0) & ~x(lgMaxSize-1, 0)) =/= 0.U
val straddlesUpperBound = ((x >> lgMaxSize) ^ (comparand >> lgMaxSize)) === 0.U && (comparand(lgMaxSize-1, 0) & (x(lgMaxSize-1, 0) | lsbMask)) =/= 0.U
val rangeAligned = !(straddlesLowerBound || straddlesUpperBound)
val pow2Aligned = (lsbMask & ~mask(lgMaxSize-1, 0)) === 0.U
Mux(napot, pow2Aligned, rangeAligned)
}
// returns whether this PMP matches at least one byte of the access
def hit(x: UInt, lgSize: UInt, lgMaxSize: Int, prev: PMP): Bool =
Mux(napot, pow2Match(x, lgSize, lgMaxSize), torNotNAPOT && rangeMatch(x, lgSize, lgMaxSize, prev))
}
class PMPHomogeneityChecker(pmps: Seq[PMP])(implicit p: Parameters) {
def apply(addr: UInt, pgLevel: UInt): Bool = {
pmps.foldLeft((true.B, 0.U.asTypeOf(new PMP))) { case ((h, prev), pmp) =>
(h && pmp.homogeneous(addr, pgLevel, prev), pmp)
}._1
}
}
class PMPChecker(lgMaxSize: Int)(implicit val p: Parameters) extends Module
with HasCoreParameters {
override def desiredName = s"PMPChecker_s${lgMaxSize}"
val io = IO(new Bundle {
val prv = Input(UInt(PRV.SZ.W))
val pmp = Input(Vec(nPMPs, new PMP))
val addr = Input(UInt(paddrBits.W))
val size = Input(UInt(log2Ceil(lgMaxSize + 1).W))
val r = Output(Bool())
val w = Output(Bool())
val x = Output(Bool())
})
val default = if (io.pmp.isEmpty) true.B else io.prv > PRV.S.U
val pmp0 = WireInit(0.U.asTypeOf(new PMP))
pmp0.cfg.r := default
pmp0.cfg.w := default
pmp0.cfg.x := default
val res = (io.pmp zip (pmp0 +: io.pmp)).reverse.foldLeft(pmp0) { case (prev, (pmp, prevPMP)) =>
val hit = pmp.hit(io.addr, io.size, lgMaxSize, prevPMP)
val ignore = default && !pmp.cfg.l
val aligned = pmp.aligned(io.addr, io.size, lgMaxSize, prevPMP)
for ((name, idx) <- Seq("no", "TOR", if (pmpGranularity <= 4) "NA4" else "", "NAPOT").zipWithIndex; if name.nonEmpty)
property.cover(pmp.cfg.a === idx.U, s"The cfg access is set to ${name} access ", "Cover PMP access mode setting")
property.cover(pmp.cfg.l === 0x1.U, s"The cfg lock is set to high ", "Cover PMP lock mode setting")
// Not including Write and no Read permission as the combination is reserved
for ((name, idx) <- Seq("no", "RO", "", "RW", "X", "RX", "", "RWX").zipWithIndex; if name.nonEmpty)
property.cover((Cat(pmp.cfg.x, pmp.cfg.w, pmp.cfg.r) === idx.U), s"The permission is set to ${name} access ", "Cover PMP access permission setting")
for ((name, idx) <- Seq("", "TOR", if (pmpGranularity <= 4) "NA4" else "", "NAPOT").zipWithIndex; if name.nonEmpty) {
property.cover(!ignore && hit && aligned && pmp.cfg.a === idx.U, s"The access matches ${name} mode ", "Cover PMP access")
property.cover(pmp.cfg.l && hit && aligned && pmp.cfg.a === idx.U, s"The access matches ${name} mode with lock bit high", "Cover PMP access with lock bit")
}
val cur = WireInit(pmp)
cur.cfg.r := aligned && (pmp.cfg.r || ignore)
cur.cfg.w := aligned && (pmp.cfg.w || ignore)
cur.cfg.x := aligned && (pmp.cfg.x || ignore)
Mux(hit, cur, prev)
}
io.r := res.cfg.r
io.w := res.cfg.w
io.x := res.cfg.x
}
| module PMPChecker_s3_7( // @[PMP.scala:143:7]
input clock, // @[PMP.scala:143:7]
input reset, // @[PMP.scala:143:7]
input [31:0] io_addr, // @[PMP.scala:146:14]
input [1:0] io_size // @[PMP.scala:146:14]
);
wire [31:0] io_addr_0 = io_addr; // @[PMP.scala:143:7]
wire [1:0] io_size_0 = io_size; // @[PMP.scala:143:7]
wire [28:0] _res_hit_msbMatch_T_8 = 29'h1FFFFFFF; // @[PMP.scala:63:54]
wire [28:0] _res_hit_msbMatch_T_18 = 29'h1FFFFFFF; // @[PMP.scala:63:54]
wire [28:0] _res_hit_msbMatch_T_28 = 29'h1FFFFFFF; // @[PMP.scala:63:54]
wire [28:0] _res_hit_msbMatch_T_38 = 29'h1FFFFFFF; // @[PMP.scala:63:54]
wire [28:0] _res_hit_msbMatch_T_48 = 29'h1FFFFFFF; // @[PMP.scala:63:54]
wire [28:0] _res_hit_msbMatch_T_58 = 29'h1FFFFFFF; // @[PMP.scala:63:54]
wire [28:0] _res_hit_msbMatch_T_68 = 29'h1FFFFFFF; // @[PMP.scala:63:54]
wire [28:0] _res_hit_msbMatch_T_78 = 29'h1FFFFFFF; // @[PMP.scala:63:54]
wire [28:0] _res_hit_msbMatch_T_5 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbMatch_T_6 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsLess_T_5 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsEqual_T_5 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsLess_T_11 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsEqual_T_12 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_aligned_straddlesLowerBound_T_5 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_aligned_straddlesUpperBound_T_5 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbMatch_T_15 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbMatch_T_16 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsLess_T_17 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsEqual_T_19 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsLess_T_23 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsEqual_T_26 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_aligned_straddlesLowerBound_T_22 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_aligned_straddlesUpperBound_T_22 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbMatch_T_25 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbMatch_T_26 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsLess_T_29 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsEqual_T_33 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsLess_T_35 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsEqual_T_40 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_aligned_straddlesLowerBound_T_39 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_aligned_straddlesUpperBound_T_39 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbMatch_T_35 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbMatch_T_36 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsLess_T_41 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsEqual_T_47 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsLess_T_47 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsEqual_T_54 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_aligned_straddlesLowerBound_T_56 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_aligned_straddlesUpperBound_T_56 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbMatch_T_45 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbMatch_T_46 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsLess_T_53 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsEqual_T_61 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsLess_T_59 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsEqual_T_68 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_aligned_straddlesLowerBound_T_73 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_aligned_straddlesUpperBound_T_73 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbMatch_T_55 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbMatch_T_56 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsLess_T_65 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsEqual_T_75 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsLess_T_71 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsEqual_T_82 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_aligned_straddlesLowerBound_T_90 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_aligned_straddlesUpperBound_T_90 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbMatch_T_65 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbMatch_T_66 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsLess_T_77 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsEqual_T_89 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsLess_T_83 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsEqual_T_96 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_aligned_straddlesLowerBound_T_107 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_aligned_straddlesUpperBound_T_107 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbMatch_T_75 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbMatch_T_76 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsLess_T_89 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsEqual_T_103 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsLess_T_95 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_hit_msbsEqual_T_110 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_aligned_straddlesLowerBound_T_124 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [28:0] _res_aligned_straddlesUpperBound_T_124 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [31:0] _res_hit_msbMatch_T_2 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbMatch_T_3 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbMatch_T_2 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbMatch_T_3 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_2 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_3 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_2 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_3 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_3 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_4 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_8 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_9 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_9 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_10 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_10 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_11 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_2 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_3 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_9 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_10 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_2 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_3 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_9 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_10 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbMatch_T_12 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbMatch_T_13 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbMatch_T_12 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbMatch_T_13 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_14 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_15 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_16 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_17 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_17 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_18 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_20 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_21 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_23 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_24 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_24 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_25 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_19 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_20 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_26 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_27 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_19 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_20 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_26 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_27 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbMatch_T_22 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbMatch_T_23 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbMatch_T_22 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbMatch_T_23 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_26 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_27 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_30 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_31 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_31 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_32 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_32 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_33 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_37 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_38 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_38 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_39 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_36 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_37 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_43 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_44 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_36 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_37 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_43 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_44 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbMatch_T_32 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbMatch_T_33 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbMatch_T_32 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbMatch_T_33 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_38 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_39 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_44 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_45 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_45 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_46 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_44 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_45 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_51 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_52 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_52 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_53 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_53 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_54 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_60 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_61 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_53 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_54 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_60 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_61 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbMatch_T_42 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbMatch_T_43 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbMatch_T_42 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbMatch_T_43 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_50 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_51 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_58 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_59 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_59 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_60 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_56 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_57 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_65 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_66 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_66 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_67 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_70 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_71 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_77 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_78 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_70 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_71 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_77 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_78 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbMatch_T_52 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbMatch_T_53 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbMatch_T_52 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbMatch_T_53 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_62 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_63 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_72 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_73 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_73 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_74 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_68 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_69 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_79 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_80 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_80 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_81 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_87 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_88 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_94 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_95 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_87 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_88 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_94 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_95 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbMatch_T_62 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbMatch_T_63 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbMatch_T_62 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbMatch_T_63 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_74 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_75 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_86 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_87 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_87 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_88 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_80 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_81 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_93 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_94 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_94 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_95 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_104 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_105 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_111 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_112 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_104 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_105 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_111 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_112 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbMatch_T_72 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbMatch_T_73 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbMatch_T_72 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbMatch_T_73 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_86 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_87 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_100 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_101 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_101 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_102 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_92 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsLess_T_93 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_107 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_msbsEqual_T_108 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_108 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_hit_lsbsLess_T_109 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_121 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_122 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_128 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesLowerBound_T_129 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_121 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_122 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_128 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [31:0] _res_aligned_straddlesUpperBound_T_129 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}]
wire [2:0] _res_aligned_pow2Aligned_T_1 = 3'h7; // @[PMP.scala:126:34]
wire [2:0] _res_aligned_pow2Aligned_T_4 = 3'h7; // @[PMP.scala:126:34]
wire [2:0] _res_aligned_pow2Aligned_T_7 = 3'h7; // @[PMP.scala:126:34]
wire [2:0] _res_aligned_pow2Aligned_T_10 = 3'h7; // @[PMP.scala:126:34]
wire [2:0] _res_aligned_pow2Aligned_T_13 = 3'h7; // @[PMP.scala:126:34]
wire [2:0] _res_aligned_pow2Aligned_T_16 = 3'h7; // @[PMP.scala:126:34]
wire [2:0] _res_aligned_pow2Aligned_T_19 = 3'h7; // @[PMP.scala:126:34]
wire [2:0] _res_aligned_pow2Aligned_T_22 = 3'h7; // @[PMP.scala:126:34]
wire [2:0] _res_hit_lsbMatch_T_5 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_hit_lsbsLess_T_6 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_hit_lsbsLess_T_13 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesLowerBound_T_12 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesLowerBound_T_15 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesUpperBound_T_12 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesUpperBound_T_15 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_pow2Aligned_T = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_5 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_7 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_9 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_11 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_13 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_15 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_hit_lsbMatch_T_15 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_hit_lsbsLess_T_20 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_hit_lsbsLess_T_27 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesLowerBound_T_29 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesLowerBound_T_32 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesUpperBound_T_29 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesUpperBound_T_32 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_pow2Aligned_T_3 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_50 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_52 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_54 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_56 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_58 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_60 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_hit_lsbMatch_T_25 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_hit_lsbsLess_T_34 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_hit_lsbsLess_T_41 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesLowerBound_T_46 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesLowerBound_T_49 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesUpperBound_T_46 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesUpperBound_T_49 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_pow2Aligned_T_6 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_95 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_97 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_99 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_101 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_103 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_105 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_hit_lsbMatch_T_35 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_hit_lsbsLess_T_48 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_hit_lsbsLess_T_55 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesLowerBound_T_63 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesLowerBound_T_66 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesUpperBound_T_63 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesUpperBound_T_66 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_pow2Aligned_T_9 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_140 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_142 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_144 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_146 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_148 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_150 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_hit_lsbMatch_T_45 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_hit_lsbsLess_T_62 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_hit_lsbsLess_T_69 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesLowerBound_T_80 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesLowerBound_T_83 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesUpperBound_T_80 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesUpperBound_T_83 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_pow2Aligned_T_12 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_185 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_187 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_189 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_191 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_193 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_195 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_hit_lsbMatch_T_55 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_hit_lsbsLess_T_76 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_hit_lsbsLess_T_83 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesLowerBound_T_97 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesLowerBound_T_100 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesUpperBound_T_97 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesUpperBound_T_100 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_pow2Aligned_T_15 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_230 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_232 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_234 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_236 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_238 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_240 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_hit_lsbMatch_T_65 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_hit_lsbsLess_T_90 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_hit_lsbsLess_T_97 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesLowerBound_T_114 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesLowerBound_T_117 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesUpperBound_T_114 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesUpperBound_T_117 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_pow2Aligned_T_18 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_275 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_277 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_279 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_281 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_283 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_285 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_hit_lsbMatch_T_75 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_hit_lsbsLess_T_104 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_hit_lsbsLess_T_111 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesLowerBound_T_131 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesLowerBound_T_134 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesUpperBound_T_131 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_straddlesUpperBound_T_134 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_aligned_pow2Aligned_T_21 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_320 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_322 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_324 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_326 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_328 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire [2:0] _res_T_330 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire _res_hit_T_8 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_ignore_T = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire res_aligned_rangeAligned = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire res_aligned = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_6 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_17 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_26 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_35 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_hit_T_21 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_ignore_T_1 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire res_aligned_rangeAligned_1 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire res_aligned_1 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_45 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_51 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_62 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_71 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_80 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_hit_T_34 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_ignore_T_2 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire res_aligned_rangeAligned_2 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire res_aligned_2 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_90 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_96 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_107 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_116 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_125 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_hit_T_47 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_ignore_T_3 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire res_aligned_rangeAligned_3 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire res_aligned_3 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_135 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_141 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_152 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_161 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_170 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_hit_T_60 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_ignore_T_4 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire res_aligned_rangeAligned_4 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire res_aligned_4 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_180 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_186 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_197 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_206 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_215 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_hit_T_73 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_ignore_T_5 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire res_aligned_rangeAligned_5 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire res_aligned_5 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_225 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_231 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_242 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_251 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_260 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_hit_T_86 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_ignore_T_6 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire res_aligned_rangeAligned_6 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire res_aligned_6 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_270 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_276 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_287 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_296 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_305 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_hit_T_99 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_ignore_T_7 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire res_aligned_rangeAligned_7 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire res_aligned_7 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_315 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_321 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_332 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_341 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire _res_T_350 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22]
wire [31:0] io_pmp_0_mask = 32'h0; // @[PMP.scala:143:7]
wire [31:0] io_pmp_1_mask = 32'h0; // @[PMP.scala:143:7]
wire [31:0] io_pmp_2_mask = 32'h0; // @[PMP.scala:143:7]
wire [31:0] io_pmp_3_mask = 32'h0; // @[PMP.scala:143:7]
wire [31:0] io_pmp_4_mask = 32'h0; // @[PMP.scala:143:7]
wire [31:0] io_pmp_5_mask = 32'h0; // @[PMP.scala:143:7]
wire [31:0] io_pmp_6_mask = 32'h0; // @[PMP.scala:143:7]
wire [31:0] io_pmp_7_mask = 32'h0; // @[PMP.scala:143:7]
wire [31:0] _pmp0_WIRE_mask = 32'h0; // @[PMP.scala:157:35]
wire [31:0] pmp0_mask = 32'h0; // @[PMP.scala:157:22]
wire [31:0] _res_hit_msbMatch_T_1 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbMatch_T_4 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_lsbMatch_T_1 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_lsbMatch_T_4 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsLess_T_1 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsLess_T_4 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsEqual_T_1 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsEqual_T_4 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_lsbsLess_T_2 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_lsbsLess_T_5 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsLess_T_7 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsLess_T_10 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsEqual_T_8 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsEqual_T_11 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_lsbsLess_T_9 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_lsbsLess_T_12 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesLowerBound_T_1 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesLowerBound_T_4 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesLowerBound_T_8 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesLowerBound_T_11 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesUpperBound_T_1 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesUpperBound_T_4 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesUpperBound_T_8 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesUpperBound_T_11 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] res_cur_mask = 32'h0; // @[PMP.scala:181:23]
wire [31:0] _res_T_44_mask = 32'h0; // @[PMP.scala:185:8]
wire [31:0] _res_hit_msbMatch_T_11 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbMatch_T_14 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_lsbMatch_T_11 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_lsbMatch_T_14 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsLess_T_13 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsLess_T_16 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsEqual_T_15 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsEqual_T_18 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_lsbsLess_T_16 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_lsbsLess_T_19 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsLess_T_19 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsLess_T_22 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsEqual_T_22 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsEqual_T_25 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_lsbsLess_T_23 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_lsbsLess_T_26 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesLowerBound_T_18 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesLowerBound_T_21 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesLowerBound_T_25 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesLowerBound_T_28 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesUpperBound_T_18 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesUpperBound_T_21 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesUpperBound_T_25 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesUpperBound_T_28 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] res_cur_1_mask = 32'h0; // @[PMP.scala:181:23]
wire [31:0] _res_T_89_mask = 32'h0; // @[PMP.scala:185:8]
wire [31:0] _res_hit_msbMatch_T_21 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbMatch_T_24 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_lsbMatch_T_21 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_lsbMatch_T_24 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsLess_T_25 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsLess_T_28 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsEqual_T_29 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsEqual_T_32 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_lsbsLess_T_30 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_lsbsLess_T_33 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsLess_T_31 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsLess_T_34 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsEqual_T_36 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsEqual_T_39 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_lsbsLess_T_37 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_lsbsLess_T_40 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesLowerBound_T_35 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesLowerBound_T_38 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesLowerBound_T_42 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesLowerBound_T_45 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesUpperBound_T_35 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesUpperBound_T_38 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesUpperBound_T_42 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesUpperBound_T_45 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] res_cur_2_mask = 32'h0; // @[PMP.scala:181:23]
wire [31:0] _res_T_134_mask = 32'h0; // @[PMP.scala:185:8]
wire [31:0] _res_hit_msbMatch_T_31 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbMatch_T_34 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_lsbMatch_T_31 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_lsbMatch_T_34 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsLess_T_37 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsLess_T_40 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsEqual_T_43 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsEqual_T_46 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_lsbsLess_T_44 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_lsbsLess_T_47 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsLess_T_43 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsLess_T_46 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsEqual_T_50 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsEqual_T_53 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_lsbsLess_T_51 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_lsbsLess_T_54 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesLowerBound_T_52 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesLowerBound_T_55 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesLowerBound_T_59 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesLowerBound_T_62 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesUpperBound_T_52 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesUpperBound_T_55 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesUpperBound_T_59 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesUpperBound_T_62 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] res_cur_3_mask = 32'h0; // @[PMP.scala:181:23]
wire [31:0] _res_T_179_mask = 32'h0; // @[PMP.scala:185:8]
wire [31:0] _res_hit_msbMatch_T_41 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbMatch_T_44 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_lsbMatch_T_41 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_lsbMatch_T_44 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsLess_T_49 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsLess_T_52 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsEqual_T_57 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsEqual_T_60 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_lsbsLess_T_58 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_lsbsLess_T_61 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsLess_T_55 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsLess_T_58 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsEqual_T_64 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsEqual_T_67 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_lsbsLess_T_65 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_lsbsLess_T_68 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesLowerBound_T_69 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesLowerBound_T_72 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesLowerBound_T_76 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesLowerBound_T_79 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesUpperBound_T_69 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesUpperBound_T_72 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesUpperBound_T_76 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesUpperBound_T_79 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] res_cur_4_mask = 32'h0; // @[PMP.scala:181:23]
wire [31:0] _res_T_224_mask = 32'h0; // @[PMP.scala:185:8]
wire [31:0] _res_hit_msbMatch_T_51 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbMatch_T_54 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_lsbMatch_T_51 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_lsbMatch_T_54 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsLess_T_61 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsLess_T_64 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsEqual_T_71 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsEqual_T_74 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_lsbsLess_T_72 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_lsbsLess_T_75 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsLess_T_67 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsLess_T_70 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsEqual_T_78 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsEqual_T_81 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_lsbsLess_T_79 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_lsbsLess_T_82 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesLowerBound_T_86 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesLowerBound_T_89 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesLowerBound_T_93 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesLowerBound_T_96 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesUpperBound_T_86 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesUpperBound_T_89 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesUpperBound_T_93 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesUpperBound_T_96 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] res_cur_5_mask = 32'h0; // @[PMP.scala:181:23]
wire [31:0] _res_T_269_mask = 32'h0; // @[PMP.scala:185:8]
wire [31:0] _res_hit_msbMatch_T_61 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbMatch_T_64 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_lsbMatch_T_61 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_lsbMatch_T_64 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsLess_T_73 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsLess_T_76 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsEqual_T_85 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsEqual_T_88 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_lsbsLess_T_86 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_lsbsLess_T_89 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsLess_T_79 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsLess_T_82 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsEqual_T_92 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsEqual_T_95 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_lsbsLess_T_93 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_lsbsLess_T_96 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesLowerBound_T_103 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesLowerBound_T_106 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesLowerBound_T_110 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesLowerBound_T_113 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesUpperBound_T_103 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesUpperBound_T_106 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesUpperBound_T_110 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesUpperBound_T_113 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] res_cur_6_mask = 32'h0; // @[PMP.scala:181:23]
wire [31:0] _res_T_314_mask = 32'h0; // @[PMP.scala:185:8]
wire [31:0] _res_hit_msbMatch_T_71 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbMatch_T_74 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_lsbMatch_T_71 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_lsbMatch_T_74 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsLess_T_85 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsLess_T_88 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsEqual_T_99 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsEqual_T_102 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_lsbsLess_T_100 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_lsbsLess_T_103 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsLess_T_91 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsLess_T_94 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_msbsEqual_T_106 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_msbsEqual_T_109 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_hit_lsbsLess_T_107 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_hit_lsbsLess_T_110 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesLowerBound_T_120 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesLowerBound_T_123 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesLowerBound_T_127 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesLowerBound_T_130 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesUpperBound_T_120 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesUpperBound_T_123 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _res_aligned_straddlesUpperBound_T_127 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _res_aligned_straddlesUpperBound_T_130 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] res_cur_7_mask = 32'h0; // @[PMP.scala:181:23]
wire [31:0] res_mask = 32'h0; // @[PMP.scala:185:8]
wire [29:0] io_pmp_0_addr = 30'h0; // @[PMP.scala:143:7]
wire [29:0] io_pmp_1_addr = 30'h0; // @[PMP.scala:143:7]
wire [29:0] io_pmp_2_addr = 30'h0; // @[PMP.scala:143:7]
wire [29:0] io_pmp_3_addr = 30'h0; // @[PMP.scala:143:7]
wire [29:0] io_pmp_4_addr = 30'h0; // @[PMP.scala:143:7]
wire [29:0] io_pmp_5_addr = 30'h0; // @[PMP.scala:143:7]
wire [29:0] io_pmp_6_addr = 30'h0; // @[PMP.scala:143:7]
wire [29:0] io_pmp_7_addr = 30'h0; // @[PMP.scala:143:7]
wire [29:0] _pmp0_WIRE_addr = 30'h0; // @[PMP.scala:157:35]
wire [29:0] pmp0_addr = 30'h0; // @[PMP.scala:157:22]
wire [29:0] res_cur_addr = 30'h0; // @[PMP.scala:181:23]
wire [29:0] _res_T_44_addr = 30'h0; // @[PMP.scala:185:8]
wire [29:0] res_cur_1_addr = 30'h0; // @[PMP.scala:181:23]
wire [29:0] _res_T_89_addr = 30'h0; // @[PMP.scala:185:8]
wire [29:0] res_cur_2_addr = 30'h0; // @[PMP.scala:181:23]
wire [29:0] _res_T_134_addr = 30'h0; // @[PMP.scala:185:8]
wire [29:0] res_cur_3_addr = 30'h0; // @[PMP.scala:181:23]
wire [29:0] _res_T_179_addr = 30'h0; // @[PMP.scala:185:8]
wire [29:0] res_cur_4_addr = 30'h0; // @[PMP.scala:181:23]
wire [29:0] _res_T_224_addr = 30'h0; // @[PMP.scala:185:8]
wire [29:0] res_cur_5_addr = 30'h0; // @[PMP.scala:181:23]
wire [29:0] _res_T_269_addr = 30'h0; // @[PMP.scala:185:8]
wire [29:0] res_cur_6_addr = 30'h0; // @[PMP.scala:181:23]
wire [29:0] _res_T_314_addr = 30'h0; // @[PMP.scala:185:8]
wire [29:0] res_cur_7_addr = 30'h0; // @[PMP.scala:181:23]
wire [29:0] res_addr = 30'h0; // @[PMP.scala:185:8]
wire [1:0] io_pmp_0_cfg_res = 2'h0; // @[PMP.scala:143:7]
wire [1:0] io_pmp_0_cfg_a = 2'h0; // @[PMP.scala:143:7]
wire [1:0] io_pmp_1_cfg_res = 2'h0; // @[PMP.scala:143:7]
wire [1:0] io_pmp_1_cfg_a = 2'h0; // @[PMP.scala:143:7]
wire [1:0] io_pmp_2_cfg_res = 2'h0; // @[PMP.scala:143:7]
wire [1:0] io_pmp_2_cfg_a = 2'h0; // @[PMP.scala:143:7]
wire [1:0] io_pmp_3_cfg_res = 2'h0; // @[PMP.scala:143:7]
wire [1:0] io_pmp_3_cfg_a = 2'h0; // @[PMP.scala:143:7]
wire [1:0] io_pmp_4_cfg_res = 2'h0; // @[PMP.scala:143:7]
wire [1:0] io_pmp_4_cfg_a = 2'h0; // @[PMP.scala:143:7]
wire [1:0] io_pmp_5_cfg_res = 2'h0; // @[PMP.scala:143:7]
wire [1:0] io_pmp_5_cfg_a = 2'h0; // @[PMP.scala:143:7]
wire [1:0] io_pmp_6_cfg_res = 2'h0; // @[PMP.scala:143:7]
wire [1:0] io_pmp_6_cfg_a = 2'h0; // @[PMP.scala:143:7]
wire [1:0] io_pmp_7_cfg_res = 2'h0; // @[PMP.scala:143:7]
wire [1:0] io_pmp_7_cfg_a = 2'h0; // @[PMP.scala:143:7]
wire [1:0] _pmp0_WIRE_cfg_res = 2'h0; // @[PMP.scala:157:35]
wire [1:0] _pmp0_WIRE_cfg_a = 2'h0; // @[PMP.scala:157:35]
wire [1:0] pmp0_cfg_res = 2'h0; // @[PMP.scala:157:22]
wire [1:0] pmp0_cfg_a = 2'h0; // @[PMP.scala:157:22]
wire [1:0] res_hi = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_1 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_2 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_3 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_4 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_5 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_cur_cfg_res = 2'h0; // @[PMP.scala:181:23]
wire [1:0] res_cur_cfg_a = 2'h0; // @[PMP.scala:181:23]
wire [1:0] _res_T_44_cfg_res = 2'h0; // @[PMP.scala:185:8]
wire [1:0] _res_T_44_cfg_a = 2'h0; // @[PMP.scala:185:8]
wire [1:0] res_hi_6 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_7 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_8 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_9 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_10 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_11 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_cur_1_cfg_res = 2'h0; // @[PMP.scala:181:23]
wire [1:0] res_cur_1_cfg_a = 2'h0; // @[PMP.scala:181:23]
wire [1:0] _res_T_89_cfg_res = 2'h0; // @[PMP.scala:185:8]
wire [1:0] _res_T_89_cfg_a = 2'h0; // @[PMP.scala:185:8]
wire [1:0] res_hi_12 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_13 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_14 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_15 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_16 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_17 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_cur_2_cfg_res = 2'h0; // @[PMP.scala:181:23]
wire [1:0] res_cur_2_cfg_a = 2'h0; // @[PMP.scala:181:23]
wire [1:0] _res_T_134_cfg_res = 2'h0; // @[PMP.scala:185:8]
wire [1:0] _res_T_134_cfg_a = 2'h0; // @[PMP.scala:185:8]
wire [1:0] res_hi_18 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_19 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_20 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_21 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_22 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_23 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_cur_3_cfg_res = 2'h0; // @[PMP.scala:181:23]
wire [1:0] res_cur_3_cfg_a = 2'h0; // @[PMP.scala:181:23]
wire [1:0] _res_T_179_cfg_res = 2'h0; // @[PMP.scala:185:8]
wire [1:0] _res_T_179_cfg_a = 2'h0; // @[PMP.scala:185:8]
wire [1:0] res_hi_24 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_25 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_26 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_27 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_28 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_29 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_cur_4_cfg_res = 2'h0; // @[PMP.scala:181:23]
wire [1:0] res_cur_4_cfg_a = 2'h0; // @[PMP.scala:181:23]
wire [1:0] _res_T_224_cfg_res = 2'h0; // @[PMP.scala:185:8]
wire [1:0] _res_T_224_cfg_a = 2'h0; // @[PMP.scala:185:8]
wire [1:0] res_hi_30 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_31 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_32 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_33 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_34 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_35 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_cur_5_cfg_res = 2'h0; // @[PMP.scala:181:23]
wire [1:0] res_cur_5_cfg_a = 2'h0; // @[PMP.scala:181:23]
wire [1:0] _res_T_269_cfg_res = 2'h0; // @[PMP.scala:185:8]
wire [1:0] _res_T_269_cfg_a = 2'h0; // @[PMP.scala:185:8]
wire [1:0] res_hi_36 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_37 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_38 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_39 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_40 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_41 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_cur_6_cfg_res = 2'h0; // @[PMP.scala:181:23]
wire [1:0] res_cur_6_cfg_a = 2'h0; // @[PMP.scala:181:23]
wire [1:0] _res_T_314_cfg_res = 2'h0; // @[PMP.scala:185:8]
wire [1:0] _res_T_314_cfg_a = 2'h0; // @[PMP.scala:185:8]
wire [1:0] res_hi_42 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_43 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_44 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_45 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_46 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_hi_47 = 2'h0; // @[PMP.scala:174:26]
wire [1:0] res_cur_7_cfg_res = 2'h0; // @[PMP.scala:181:23]
wire [1:0] res_cur_7_cfg_a = 2'h0; // @[PMP.scala:181:23]
wire [1:0] res_cfg_res = 2'h0; // @[PMP.scala:185:8]
wire [1:0] res_cfg_a = 2'h0; // @[PMP.scala:185:8]
wire io_pmp_0_cfg_l = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_0_cfg_x = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_0_cfg_w = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_0_cfg_r = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_1_cfg_l = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_1_cfg_x = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_1_cfg_w = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_1_cfg_r = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_2_cfg_l = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_2_cfg_x = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_2_cfg_w = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_2_cfg_r = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_3_cfg_l = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_3_cfg_x = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_3_cfg_w = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_3_cfg_r = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_4_cfg_l = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_4_cfg_x = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_4_cfg_w = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_4_cfg_r = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_5_cfg_l = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_5_cfg_x = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_5_cfg_w = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_5_cfg_r = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_6_cfg_l = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_6_cfg_x = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_6_cfg_w = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_6_cfg_r = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_7_cfg_l = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_7_cfg_x = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_7_cfg_w = 1'h0; // @[PMP.scala:143:7]
wire io_pmp_7_cfg_r = 1'h0; // @[PMP.scala:143:7]
wire io_r = 1'h0; // @[PMP.scala:143:7]
wire io_w = 1'h0; // @[PMP.scala:143:7]
wire io_x = 1'h0; // @[PMP.scala:143:7]
wire default_0 = 1'h0; // @[PMP.scala:156:56]
wire _pmp0_WIRE_cfg_l = 1'h0; // @[PMP.scala:157:35]
wire _pmp0_WIRE_cfg_x = 1'h0; // @[PMP.scala:157:35]
wire _pmp0_WIRE_cfg_w = 1'h0; // @[PMP.scala:157:35]
wire _pmp0_WIRE_cfg_r = 1'h0; // @[PMP.scala:157:35]
wire pmp0_cfg_l = 1'h0; // @[PMP.scala:157:22]
wire pmp0_cfg_x = 1'h0; // @[PMP.scala:157:22]
wire pmp0_cfg_w = 1'h0; // @[PMP.scala:157:22]
wire pmp0_cfg_r = 1'h0; // @[PMP.scala:157:22]
wire _res_hit_T = 1'h0; // @[PMP.scala:45:20]
wire _res_hit_T_2 = 1'h0; // @[PMP.scala:46:26]
wire res_hit_msbsLess = 1'h0; // @[PMP.scala:80:39]
wire res_hit_lsbsLess = 1'h0; // @[PMP.scala:82:53]
wire _res_hit_T_6 = 1'h0; // @[PMP.scala:83:30]
wire _res_hit_T_7 = 1'h0; // @[PMP.scala:83:16]
wire res_hit_msbsLess_1 = 1'h0; // @[PMP.scala:80:39]
wire res_hit_lsbsLess_1 = 1'h0; // @[PMP.scala:82:53]
wire _res_hit_T_9 = 1'h0; // @[PMP.scala:83:30]
wire _res_hit_T_10 = 1'h0; // @[PMP.scala:83:16]
wire _res_hit_T_11 = 1'h0; // @[PMP.scala:94:48]
wire _res_hit_T_12 = 1'h0; // @[PMP.scala:132:61]
wire res_hit = 1'h0; // @[PMP.scala:132:8]
wire res_ignore = 1'h0; // @[PMP.scala:164:26]
wire _res_aligned_straddlesLowerBound_T_16 = 1'h0; // @[PMP.scala:123:147]
wire res_aligned_straddlesLowerBound = 1'h0; // @[PMP.scala:123:90]
wire _res_aligned_straddlesUpperBound_T_16 = 1'h0; // @[PMP.scala:124:148]
wire res_aligned_straddlesUpperBound = 1'h0; // @[PMP.scala:124:85]
wire _res_aligned_rangeAligned_T = 1'h0; // @[PMP.scala:125:46]
wire _res_aligned_T = 1'h0; // @[PMP.scala:45:20]
wire _res_T_1 = 1'h0; // @[PMP.scala:168:32]
wire _res_T_2 = 1'h0; // @[PMP.scala:168:32]
wire _res_T_3 = 1'h0; // @[PMP.scala:168:32]
wire _res_T_4 = 1'h0; // @[PMP.scala:170:30]
wire _res_T_8 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_10 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_12 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_14 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_16 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_18 = 1'h0; // @[PMP.scala:177:30]
wire _res_T_19 = 1'h0; // @[PMP.scala:177:37]
wire _res_T_20 = 1'h0; // @[PMP.scala:177:61]
wire _res_T_21 = 1'h0; // @[PMP.scala:177:48]
wire _res_T_22 = 1'h0; // @[PMP.scala:178:32]
wire _res_T_23 = 1'h0; // @[PMP.scala:178:39]
wire _res_T_24 = 1'h0; // @[PMP.scala:178:63]
wire _res_T_25 = 1'h0; // @[PMP.scala:178:50]
wire _res_T_27 = 1'h0; // @[PMP.scala:177:30]
wire _res_T_28 = 1'h0; // @[PMP.scala:177:37]
wire _res_T_29 = 1'h0; // @[PMP.scala:177:61]
wire _res_T_30 = 1'h0; // @[PMP.scala:177:48]
wire _res_T_31 = 1'h0; // @[PMP.scala:178:32]
wire _res_T_32 = 1'h0; // @[PMP.scala:178:39]
wire _res_T_33 = 1'h0; // @[PMP.scala:178:63]
wire _res_T_34 = 1'h0; // @[PMP.scala:178:50]
wire _res_T_36 = 1'h0; // @[PMP.scala:177:30]
wire _res_T_37 = 1'h0; // @[PMP.scala:177:37]
wire _res_T_38 = 1'h0; // @[PMP.scala:177:61]
wire _res_T_39 = 1'h0; // @[PMP.scala:177:48]
wire _res_T_40 = 1'h0; // @[PMP.scala:178:32]
wire _res_T_41 = 1'h0; // @[PMP.scala:178:39]
wire _res_T_42 = 1'h0; // @[PMP.scala:178:63]
wire _res_T_43 = 1'h0; // @[PMP.scala:178:50]
wire res_cur_cfg_l = 1'h0; // @[PMP.scala:181:23]
wire res_cur_cfg_x = 1'h0; // @[PMP.scala:181:23]
wire res_cur_cfg_w = 1'h0; // @[PMP.scala:181:23]
wire res_cur_cfg_r = 1'h0; // @[PMP.scala:181:23]
wire _res_cur_cfg_r_T = 1'h0; // @[PMP.scala:182:40]
wire _res_cur_cfg_r_T_1 = 1'h0; // @[PMP.scala:182:26]
wire _res_cur_cfg_w_T = 1'h0; // @[PMP.scala:183:40]
wire _res_cur_cfg_w_T_1 = 1'h0; // @[PMP.scala:183:26]
wire _res_cur_cfg_x_T = 1'h0; // @[PMP.scala:184:40]
wire _res_cur_cfg_x_T_1 = 1'h0; // @[PMP.scala:184:26]
wire _res_T_44_cfg_l = 1'h0; // @[PMP.scala:185:8]
wire _res_T_44_cfg_x = 1'h0; // @[PMP.scala:185:8]
wire _res_T_44_cfg_w = 1'h0; // @[PMP.scala:185:8]
wire _res_T_44_cfg_r = 1'h0; // @[PMP.scala:185:8]
wire _res_hit_T_13 = 1'h0; // @[PMP.scala:45:20]
wire _res_hit_T_15 = 1'h0; // @[PMP.scala:46:26]
wire res_hit_msbsLess_2 = 1'h0; // @[PMP.scala:80:39]
wire res_hit_lsbsLess_2 = 1'h0; // @[PMP.scala:82:53]
wire _res_hit_T_19 = 1'h0; // @[PMP.scala:83:30]
wire _res_hit_T_20 = 1'h0; // @[PMP.scala:83:16]
wire res_hit_msbsLess_3 = 1'h0; // @[PMP.scala:80:39]
wire res_hit_lsbsLess_3 = 1'h0; // @[PMP.scala:82:53]
wire _res_hit_T_22 = 1'h0; // @[PMP.scala:83:30]
wire _res_hit_T_23 = 1'h0; // @[PMP.scala:83:16]
wire _res_hit_T_24 = 1'h0; // @[PMP.scala:94:48]
wire _res_hit_T_25 = 1'h0; // @[PMP.scala:132:61]
wire res_hit_1 = 1'h0; // @[PMP.scala:132:8]
wire res_ignore_1 = 1'h0; // @[PMP.scala:164:26]
wire _res_aligned_straddlesLowerBound_T_33 = 1'h0; // @[PMP.scala:123:147]
wire res_aligned_straddlesLowerBound_1 = 1'h0; // @[PMP.scala:123:90]
wire _res_aligned_straddlesUpperBound_T_33 = 1'h0; // @[PMP.scala:124:148]
wire res_aligned_straddlesUpperBound_1 = 1'h0; // @[PMP.scala:124:85]
wire _res_aligned_rangeAligned_T_1 = 1'h0; // @[PMP.scala:125:46]
wire _res_aligned_T_1 = 1'h0; // @[PMP.scala:45:20]
wire _res_T_46 = 1'h0; // @[PMP.scala:168:32]
wire _res_T_47 = 1'h0; // @[PMP.scala:168:32]
wire _res_T_48 = 1'h0; // @[PMP.scala:168:32]
wire _res_T_49 = 1'h0; // @[PMP.scala:170:30]
wire _res_T_53 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_55 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_57 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_59 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_61 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_63 = 1'h0; // @[PMP.scala:177:30]
wire _res_T_64 = 1'h0; // @[PMP.scala:177:37]
wire _res_T_65 = 1'h0; // @[PMP.scala:177:61]
wire _res_T_66 = 1'h0; // @[PMP.scala:177:48]
wire _res_T_67 = 1'h0; // @[PMP.scala:178:32]
wire _res_T_68 = 1'h0; // @[PMP.scala:178:39]
wire _res_T_69 = 1'h0; // @[PMP.scala:178:63]
wire _res_T_70 = 1'h0; // @[PMP.scala:178:50]
wire _res_T_72 = 1'h0; // @[PMP.scala:177:30]
wire _res_T_73 = 1'h0; // @[PMP.scala:177:37]
wire _res_T_74 = 1'h0; // @[PMP.scala:177:61]
wire _res_T_75 = 1'h0; // @[PMP.scala:177:48]
wire _res_T_76 = 1'h0; // @[PMP.scala:178:32]
wire _res_T_77 = 1'h0; // @[PMP.scala:178:39]
wire _res_T_78 = 1'h0; // @[PMP.scala:178:63]
wire _res_T_79 = 1'h0; // @[PMP.scala:178:50]
wire _res_T_81 = 1'h0; // @[PMP.scala:177:30]
wire _res_T_82 = 1'h0; // @[PMP.scala:177:37]
wire _res_T_83 = 1'h0; // @[PMP.scala:177:61]
wire _res_T_84 = 1'h0; // @[PMP.scala:177:48]
wire _res_T_85 = 1'h0; // @[PMP.scala:178:32]
wire _res_T_86 = 1'h0; // @[PMP.scala:178:39]
wire _res_T_87 = 1'h0; // @[PMP.scala:178:63]
wire _res_T_88 = 1'h0; // @[PMP.scala:178:50]
wire res_cur_1_cfg_l = 1'h0; // @[PMP.scala:181:23]
wire res_cur_1_cfg_x = 1'h0; // @[PMP.scala:181:23]
wire res_cur_1_cfg_w = 1'h0; // @[PMP.scala:181:23]
wire res_cur_1_cfg_r = 1'h0; // @[PMP.scala:181:23]
wire _res_cur_cfg_r_T_2 = 1'h0; // @[PMP.scala:182:40]
wire _res_cur_cfg_r_T_3 = 1'h0; // @[PMP.scala:182:26]
wire _res_cur_cfg_w_T_2 = 1'h0; // @[PMP.scala:183:40]
wire _res_cur_cfg_w_T_3 = 1'h0; // @[PMP.scala:183:26]
wire _res_cur_cfg_x_T_2 = 1'h0; // @[PMP.scala:184:40]
wire _res_cur_cfg_x_T_3 = 1'h0; // @[PMP.scala:184:26]
wire _res_T_89_cfg_l = 1'h0; // @[PMP.scala:185:8]
wire _res_T_89_cfg_x = 1'h0; // @[PMP.scala:185:8]
wire _res_T_89_cfg_w = 1'h0; // @[PMP.scala:185:8]
wire _res_T_89_cfg_r = 1'h0; // @[PMP.scala:185:8]
wire _res_hit_T_26 = 1'h0; // @[PMP.scala:45:20]
wire _res_hit_T_28 = 1'h0; // @[PMP.scala:46:26]
wire res_hit_msbsLess_4 = 1'h0; // @[PMP.scala:80:39]
wire res_hit_lsbsLess_4 = 1'h0; // @[PMP.scala:82:53]
wire _res_hit_T_32 = 1'h0; // @[PMP.scala:83:30]
wire _res_hit_T_33 = 1'h0; // @[PMP.scala:83:16]
wire res_hit_msbsLess_5 = 1'h0; // @[PMP.scala:80:39]
wire res_hit_lsbsLess_5 = 1'h0; // @[PMP.scala:82:53]
wire _res_hit_T_35 = 1'h0; // @[PMP.scala:83:30]
wire _res_hit_T_36 = 1'h0; // @[PMP.scala:83:16]
wire _res_hit_T_37 = 1'h0; // @[PMP.scala:94:48]
wire _res_hit_T_38 = 1'h0; // @[PMP.scala:132:61]
wire res_hit_2 = 1'h0; // @[PMP.scala:132:8]
wire res_ignore_2 = 1'h0; // @[PMP.scala:164:26]
wire _res_aligned_straddlesLowerBound_T_50 = 1'h0; // @[PMP.scala:123:147]
wire res_aligned_straddlesLowerBound_2 = 1'h0; // @[PMP.scala:123:90]
wire _res_aligned_straddlesUpperBound_T_50 = 1'h0; // @[PMP.scala:124:148]
wire res_aligned_straddlesUpperBound_2 = 1'h0; // @[PMP.scala:124:85]
wire _res_aligned_rangeAligned_T_2 = 1'h0; // @[PMP.scala:125:46]
wire _res_aligned_T_2 = 1'h0; // @[PMP.scala:45:20]
wire _res_T_91 = 1'h0; // @[PMP.scala:168:32]
wire _res_T_92 = 1'h0; // @[PMP.scala:168:32]
wire _res_T_93 = 1'h0; // @[PMP.scala:168:32]
wire _res_T_94 = 1'h0; // @[PMP.scala:170:30]
wire _res_T_98 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_100 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_102 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_104 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_106 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_108 = 1'h0; // @[PMP.scala:177:30]
wire _res_T_109 = 1'h0; // @[PMP.scala:177:37]
wire _res_T_110 = 1'h0; // @[PMP.scala:177:61]
wire _res_T_111 = 1'h0; // @[PMP.scala:177:48]
wire _res_T_112 = 1'h0; // @[PMP.scala:178:32]
wire _res_T_113 = 1'h0; // @[PMP.scala:178:39]
wire _res_T_114 = 1'h0; // @[PMP.scala:178:63]
wire _res_T_115 = 1'h0; // @[PMP.scala:178:50]
wire _res_T_117 = 1'h0; // @[PMP.scala:177:30]
wire _res_T_118 = 1'h0; // @[PMP.scala:177:37]
wire _res_T_119 = 1'h0; // @[PMP.scala:177:61]
wire _res_T_120 = 1'h0; // @[PMP.scala:177:48]
wire _res_T_121 = 1'h0; // @[PMP.scala:178:32]
wire _res_T_122 = 1'h0; // @[PMP.scala:178:39]
wire _res_T_123 = 1'h0; // @[PMP.scala:178:63]
wire _res_T_124 = 1'h0; // @[PMP.scala:178:50]
wire _res_T_126 = 1'h0; // @[PMP.scala:177:30]
wire _res_T_127 = 1'h0; // @[PMP.scala:177:37]
wire _res_T_128 = 1'h0; // @[PMP.scala:177:61]
wire _res_T_129 = 1'h0; // @[PMP.scala:177:48]
wire _res_T_130 = 1'h0; // @[PMP.scala:178:32]
wire _res_T_131 = 1'h0; // @[PMP.scala:178:39]
wire _res_T_132 = 1'h0; // @[PMP.scala:178:63]
wire _res_T_133 = 1'h0; // @[PMP.scala:178:50]
wire res_cur_2_cfg_l = 1'h0; // @[PMP.scala:181:23]
wire res_cur_2_cfg_x = 1'h0; // @[PMP.scala:181:23]
wire res_cur_2_cfg_w = 1'h0; // @[PMP.scala:181:23]
wire res_cur_2_cfg_r = 1'h0; // @[PMP.scala:181:23]
wire _res_cur_cfg_r_T_4 = 1'h0; // @[PMP.scala:182:40]
wire _res_cur_cfg_r_T_5 = 1'h0; // @[PMP.scala:182:26]
wire _res_cur_cfg_w_T_4 = 1'h0; // @[PMP.scala:183:40]
wire _res_cur_cfg_w_T_5 = 1'h0; // @[PMP.scala:183:26]
wire _res_cur_cfg_x_T_4 = 1'h0; // @[PMP.scala:184:40]
wire _res_cur_cfg_x_T_5 = 1'h0; // @[PMP.scala:184:26]
wire _res_T_134_cfg_l = 1'h0; // @[PMP.scala:185:8]
wire _res_T_134_cfg_x = 1'h0; // @[PMP.scala:185:8]
wire _res_T_134_cfg_w = 1'h0; // @[PMP.scala:185:8]
wire _res_T_134_cfg_r = 1'h0; // @[PMP.scala:185:8]
wire _res_hit_T_39 = 1'h0; // @[PMP.scala:45:20]
wire _res_hit_T_41 = 1'h0; // @[PMP.scala:46:26]
wire res_hit_msbsLess_6 = 1'h0; // @[PMP.scala:80:39]
wire res_hit_lsbsLess_6 = 1'h0; // @[PMP.scala:82:53]
wire _res_hit_T_45 = 1'h0; // @[PMP.scala:83:30]
wire _res_hit_T_46 = 1'h0; // @[PMP.scala:83:16]
wire res_hit_msbsLess_7 = 1'h0; // @[PMP.scala:80:39]
wire res_hit_lsbsLess_7 = 1'h0; // @[PMP.scala:82:53]
wire _res_hit_T_48 = 1'h0; // @[PMP.scala:83:30]
wire _res_hit_T_49 = 1'h0; // @[PMP.scala:83:16]
wire _res_hit_T_50 = 1'h0; // @[PMP.scala:94:48]
wire _res_hit_T_51 = 1'h0; // @[PMP.scala:132:61]
wire res_hit_3 = 1'h0; // @[PMP.scala:132:8]
wire res_ignore_3 = 1'h0; // @[PMP.scala:164:26]
wire _res_aligned_straddlesLowerBound_T_67 = 1'h0; // @[PMP.scala:123:147]
wire res_aligned_straddlesLowerBound_3 = 1'h0; // @[PMP.scala:123:90]
wire _res_aligned_straddlesUpperBound_T_67 = 1'h0; // @[PMP.scala:124:148]
wire res_aligned_straddlesUpperBound_3 = 1'h0; // @[PMP.scala:124:85]
wire _res_aligned_rangeAligned_T_3 = 1'h0; // @[PMP.scala:125:46]
wire _res_aligned_T_3 = 1'h0; // @[PMP.scala:45:20]
wire _res_T_136 = 1'h0; // @[PMP.scala:168:32]
wire _res_T_137 = 1'h0; // @[PMP.scala:168:32]
wire _res_T_138 = 1'h0; // @[PMP.scala:168:32]
wire _res_T_139 = 1'h0; // @[PMP.scala:170:30]
wire _res_T_143 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_145 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_147 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_149 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_151 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_153 = 1'h0; // @[PMP.scala:177:30]
wire _res_T_154 = 1'h0; // @[PMP.scala:177:37]
wire _res_T_155 = 1'h0; // @[PMP.scala:177:61]
wire _res_T_156 = 1'h0; // @[PMP.scala:177:48]
wire _res_T_157 = 1'h0; // @[PMP.scala:178:32]
wire _res_T_158 = 1'h0; // @[PMP.scala:178:39]
wire _res_T_159 = 1'h0; // @[PMP.scala:178:63]
wire _res_T_160 = 1'h0; // @[PMP.scala:178:50]
wire _res_T_162 = 1'h0; // @[PMP.scala:177:30]
wire _res_T_163 = 1'h0; // @[PMP.scala:177:37]
wire _res_T_164 = 1'h0; // @[PMP.scala:177:61]
wire _res_T_165 = 1'h0; // @[PMP.scala:177:48]
wire _res_T_166 = 1'h0; // @[PMP.scala:178:32]
wire _res_T_167 = 1'h0; // @[PMP.scala:178:39]
wire _res_T_168 = 1'h0; // @[PMP.scala:178:63]
wire _res_T_169 = 1'h0; // @[PMP.scala:178:50]
wire _res_T_171 = 1'h0; // @[PMP.scala:177:30]
wire _res_T_172 = 1'h0; // @[PMP.scala:177:37]
wire _res_T_173 = 1'h0; // @[PMP.scala:177:61]
wire _res_T_174 = 1'h0; // @[PMP.scala:177:48]
wire _res_T_175 = 1'h0; // @[PMP.scala:178:32]
wire _res_T_176 = 1'h0; // @[PMP.scala:178:39]
wire _res_T_177 = 1'h0; // @[PMP.scala:178:63]
wire _res_T_178 = 1'h0; // @[PMP.scala:178:50]
wire res_cur_3_cfg_l = 1'h0; // @[PMP.scala:181:23]
wire res_cur_3_cfg_x = 1'h0; // @[PMP.scala:181:23]
wire res_cur_3_cfg_w = 1'h0; // @[PMP.scala:181:23]
wire res_cur_3_cfg_r = 1'h0; // @[PMP.scala:181:23]
wire _res_cur_cfg_r_T_6 = 1'h0; // @[PMP.scala:182:40]
wire _res_cur_cfg_r_T_7 = 1'h0; // @[PMP.scala:182:26]
wire _res_cur_cfg_w_T_6 = 1'h0; // @[PMP.scala:183:40]
wire _res_cur_cfg_w_T_7 = 1'h0; // @[PMP.scala:183:26]
wire _res_cur_cfg_x_T_6 = 1'h0; // @[PMP.scala:184:40]
wire _res_cur_cfg_x_T_7 = 1'h0; // @[PMP.scala:184:26]
wire _res_T_179_cfg_l = 1'h0; // @[PMP.scala:185:8]
wire _res_T_179_cfg_x = 1'h0; // @[PMP.scala:185:8]
wire _res_T_179_cfg_w = 1'h0; // @[PMP.scala:185:8]
wire _res_T_179_cfg_r = 1'h0; // @[PMP.scala:185:8]
wire _res_hit_T_52 = 1'h0; // @[PMP.scala:45:20]
wire _res_hit_T_54 = 1'h0; // @[PMP.scala:46:26]
wire res_hit_msbsLess_8 = 1'h0; // @[PMP.scala:80:39]
wire res_hit_lsbsLess_8 = 1'h0; // @[PMP.scala:82:53]
wire _res_hit_T_58 = 1'h0; // @[PMP.scala:83:30]
wire _res_hit_T_59 = 1'h0; // @[PMP.scala:83:16]
wire res_hit_msbsLess_9 = 1'h0; // @[PMP.scala:80:39]
wire res_hit_lsbsLess_9 = 1'h0; // @[PMP.scala:82:53]
wire _res_hit_T_61 = 1'h0; // @[PMP.scala:83:30]
wire _res_hit_T_62 = 1'h0; // @[PMP.scala:83:16]
wire _res_hit_T_63 = 1'h0; // @[PMP.scala:94:48]
wire _res_hit_T_64 = 1'h0; // @[PMP.scala:132:61]
wire res_hit_4 = 1'h0; // @[PMP.scala:132:8]
wire res_ignore_4 = 1'h0; // @[PMP.scala:164:26]
wire _res_aligned_straddlesLowerBound_T_84 = 1'h0; // @[PMP.scala:123:147]
wire res_aligned_straddlesLowerBound_4 = 1'h0; // @[PMP.scala:123:90]
wire _res_aligned_straddlesUpperBound_T_84 = 1'h0; // @[PMP.scala:124:148]
wire res_aligned_straddlesUpperBound_4 = 1'h0; // @[PMP.scala:124:85]
wire _res_aligned_rangeAligned_T_4 = 1'h0; // @[PMP.scala:125:46]
wire _res_aligned_T_4 = 1'h0; // @[PMP.scala:45:20]
wire _res_T_181 = 1'h0; // @[PMP.scala:168:32]
wire _res_T_182 = 1'h0; // @[PMP.scala:168:32]
wire _res_T_183 = 1'h0; // @[PMP.scala:168:32]
wire _res_T_184 = 1'h0; // @[PMP.scala:170:30]
wire _res_T_188 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_190 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_192 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_194 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_196 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_198 = 1'h0; // @[PMP.scala:177:30]
wire _res_T_199 = 1'h0; // @[PMP.scala:177:37]
wire _res_T_200 = 1'h0; // @[PMP.scala:177:61]
wire _res_T_201 = 1'h0; // @[PMP.scala:177:48]
wire _res_T_202 = 1'h0; // @[PMP.scala:178:32]
wire _res_T_203 = 1'h0; // @[PMP.scala:178:39]
wire _res_T_204 = 1'h0; // @[PMP.scala:178:63]
wire _res_T_205 = 1'h0; // @[PMP.scala:178:50]
wire _res_T_207 = 1'h0; // @[PMP.scala:177:30]
wire _res_T_208 = 1'h0; // @[PMP.scala:177:37]
wire _res_T_209 = 1'h0; // @[PMP.scala:177:61]
wire _res_T_210 = 1'h0; // @[PMP.scala:177:48]
wire _res_T_211 = 1'h0; // @[PMP.scala:178:32]
wire _res_T_212 = 1'h0; // @[PMP.scala:178:39]
wire _res_T_213 = 1'h0; // @[PMP.scala:178:63]
wire _res_T_214 = 1'h0; // @[PMP.scala:178:50]
wire _res_T_216 = 1'h0; // @[PMP.scala:177:30]
wire _res_T_217 = 1'h0; // @[PMP.scala:177:37]
wire _res_T_218 = 1'h0; // @[PMP.scala:177:61]
wire _res_T_219 = 1'h0; // @[PMP.scala:177:48]
wire _res_T_220 = 1'h0; // @[PMP.scala:178:32]
wire _res_T_221 = 1'h0; // @[PMP.scala:178:39]
wire _res_T_222 = 1'h0; // @[PMP.scala:178:63]
wire _res_T_223 = 1'h0; // @[PMP.scala:178:50]
wire res_cur_4_cfg_l = 1'h0; // @[PMP.scala:181:23]
wire res_cur_4_cfg_x = 1'h0; // @[PMP.scala:181:23]
wire res_cur_4_cfg_w = 1'h0; // @[PMP.scala:181:23]
wire res_cur_4_cfg_r = 1'h0; // @[PMP.scala:181:23]
wire _res_cur_cfg_r_T_8 = 1'h0; // @[PMP.scala:182:40]
wire _res_cur_cfg_r_T_9 = 1'h0; // @[PMP.scala:182:26]
wire _res_cur_cfg_w_T_8 = 1'h0; // @[PMP.scala:183:40]
wire _res_cur_cfg_w_T_9 = 1'h0; // @[PMP.scala:183:26]
wire _res_cur_cfg_x_T_8 = 1'h0; // @[PMP.scala:184:40]
wire _res_cur_cfg_x_T_9 = 1'h0; // @[PMP.scala:184:26]
wire _res_T_224_cfg_l = 1'h0; // @[PMP.scala:185:8]
wire _res_T_224_cfg_x = 1'h0; // @[PMP.scala:185:8]
wire _res_T_224_cfg_w = 1'h0; // @[PMP.scala:185:8]
wire _res_T_224_cfg_r = 1'h0; // @[PMP.scala:185:8]
wire _res_hit_T_65 = 1'h0; // @[PMP.scala:45:20]
wire _res_hit_T_67 = 1'h0; // @[PMP.scala:46:26]
wire res_hit_msbsLess_10 = 1'h0; // @[PMP.scala:80:39]
wire res_hit_lsbsLess_10 = 1'h0; // @[PMP.scala:82:53]
wire _res_hit_T_71 = 1'h0; // @[PMP.scala:83:30]
wire _res_hit_T_72 = 1'h0; // @[PMP.scala:83:16]
wire res_hit_msbsLess_11 = 1'h0; // @[PMP.scala:80:39]
wire res_hit_lsbsLess_11 = 1'h0; // @[PMP.scala:82:53]
wire _res_hit_T_74 = 1'h0; // @[PMP.scala:83:30]
wire _res_hit_T_75 = 1'h0; // @[PMP.scala:83:16]
wire _res_hit_T_76 = 1'h0; // @[PMP.scala:94:48]
wire _res_hit_T_77 = 1'h0; // @[PMP.scala:132:61]
wire res_hit_5 = 1'h0; // @[PMP.scala:132:8]
wire res_ignore_5 = 1'h0; // @[PMP.scala:164:26]
wire _res_aligned_straddlesLowerBound_T_101 = 1'h0; // @[PMP.scala:123:147]
wire res_aligned_straddlesLowerBound_5 = 1'h0; // @[PMP.scala:123:90]
wire _res_aligned_straddlesUpperBound_T_101 = 1'h0; // @[PMP.scala:124:148]
wire res_aligned_straddlesUpperBound_5 = 1'h0; // @[PMP.scala:124:85]
wire _res_aligned_rangeAligned_T_5 = 1'h0; // @[PMP.scala:125:46]
wire _res_aligned_T_5 = 1'h0; // @[PMP.scala:45:20]
wire _res_T_226 = 1'h0; // @[PMP.scala:168:32]
wire _res_T_227 = 1'h0; // @[PMP.scala:168:32]
wire _res_T_228 = 1'h0; // @[PMP.scala:168:32]
wire _res_T_229 = 1'h0; // @[PMP.scala:170:30]
wire _res_T_233 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_235 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_237 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_239 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_241 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_243 = 1'h0; // @[PMP.scala:177:30]
wire _res_T_244 = 1'h0; // @[PMP.scala:177:37]
wire _res_T_245 = 1'h0; // @[PMP.scala:177:61]
wire _res_T_246 = 1'h0; // @[PMP.scala:177:48]
wire _res_T_247 = 1'h0; // @[PMP.scala:178:32]
wire _res_T_248 = 1'h0; // @[PMP.scala:178:39]
wire _res_T_249 = 1'h0; // @[PMP.scala:178:63]
wire _res_T_250 = 1'h0; // @[PMP.scala:178:50]
wire _res_T_252 = 1'h0; // @[PMP.scala:177:30]
wire _res_T_253 = 1'h0; // @[PMP.scala:177:37]
wire _res_T_254 = 1'h0; // @[PMP.scala:177:61]
wire _res_T_255 = 1'h0; // @[PMP.scala:177:48]
wire _res_T_256 = 1'h0; // @[PMP.scala:178:32]
wire _res_T_257 = 1'h0; // @[PMP.scala:178:39]
wire _res_T_258 = 1'h0; // @[PMP.scala:178:63]
wire _res_T_259 = 1'h0; // @[PMP.scala:178:50]
wire _res_T_261 = 1'h0; // @[PMP.scala:177:30]
wire _res_T_262 = 1'h0; // @[PMP.scala:177:37]
wire _res_T_263 = 1'h0; // @[PMP.scala:177:61]
wire _res_T_264 = 1'h0; // @[PMP.scala:177:48]
wire _res_T_265 = 1'h0; // @[PMP.scala:178:32]
wire _res_T_266 = 1'h0; // @[PMP.scala:178:39]
wire _res_T_267 = 1'h0; // @[PMP.scala:178:63]
wire _res_T_268 = 1'h0; // @[PMP.scala:178:50]
wire res_cur_5_cfg_l = 1'h0; // @[PMP.scala:181:23]
wire res_cur_5_cfg_x = 1'h0; // @[PMP.scala:181:23]
wire res_cur_5_cfg_w = 1'h0; // @[PMP.scala:181:23]
wire res_cur_5_cfg_r = 1'h0; // @[PMP.scala:181:23]
wire _res_cur_cfg_r_T_10 = 1'h0; // @[PMP.scala:182:40]
wire _res_cur_cfg_r_T_11 = 1'h0; // @[PMP.scala:182:26]
wire _res_cur_cfg_w_T_10 = 1'h0; // @[PMP.scala:183:40]
wire _res_cur_cfg_w_T_11 = 1'h0; // @[PMP.scala:183:26]
wire _res_cur_cfg_x_T_10 = 1'h0; // @[PMP.scala:184:40]
wire _res_cur_cfg_x_T_11 = 1'h0; // @[PMP.scala:184:26]
wire _res_T_269_cfg_l = 1'h0; // @[PMP.scala:185:8]
wire _res_T_269_cfg_x = 1'h0; // @[PMP.scala:185:8]
wire _res_T_269_cfg_w = 1'h0; // @[PMP.scala:185:8]
wire _res_T_269_cfg_r = 1'h0; // @[PMP.scala:185:8]
wire _res_hit_T_78 = 1'h0; // @[PMP.scala:45:20]
wire _res_hit_T_80 = 1'h0; // @[PMP.scala:46:26]
wire res_hit_msbsLess_12 = 1'h0; // @[PMP.scala:80:39]
wire res_hit_lsbsLess_12 = 1'h0; // @[PMP.scala:82:53]
wire _res_hit_T_84 = 1'h0; // @[PMP.scala:83:30]
wire _res_hit_T_85 = 1'h0; // @[PMP.scala:83:16]
wire res_hit_msbsLess_13 = 1'h0; // @[PMP.scala:80:39]
wire res_hit_lsbsLess_13 = 1'h0; // @[PMP.scala:82:53]
wire _res_hit_T_87 = 1'h0; // @[PMP.scala:83:30]
wire _res_hit_T_88 = 1'h0; // @[PMP.scala:83:16]
wire _res_hit_T_89 = 1'h0; // @[PMP.scala:94:48]
wire _res_hit_T_90 = 1'h0; // @[PMP.scala:132:61]
wire res_hit_6 = 1'h0; // @[PMP.scala:132:8]
wire res_ignore_6 = 1'h0; // @[PMP.scala:164:26]
wire _res_aligned_straddlesLowerBound_T_118 = 1'h0; // @[PMP.scala:123:147]
wire res_aligned_straddlesLowerBound_6 = 1'h0; // @[PMP.scala:123:90]
wire _res_aligned_straddlesUpperBound_T_118 = 1'h0; // @[PMP.scala:124:148]
wire res_aligned_straddlesUpperBound_6 = 1'h0; // @[PMP.scala:124:85]
wire _res_aligned_rangeAligned_T_6 = 1'h0; // @[PMP.scala:125:46]
wire _res_aligned_T_6 = 1'h0; // @[PMP.scala:45:20]
wire _res_T_271 = 1'h0; // @[PMP.scala:168:32]
wire _res_T_272 = 1'h0; // @[PMP.scala:168:32]
wire _res_T_273 = 1'h0; // @[PMP.scala:168:32]
wire _res_T_274 = 1'h0; // @[PMP.scala:170:30]
wire _res_T_278 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_280 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_282 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_284 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_286 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_288 = 1'h0; // @[PMP.scala:177:30]
wire _res_T_289 = 1'h0; // @[PMP.scala:177:37]
wire _res_T_290 = 1'h0; // @[PMP.scala:177:61]
wire _res_T_291 = 1'h0; // @[PMP.scala:177:48]
wire _res_T_292 = 1'h0; // @[PMP.scala:178:32]
wire _res_T_293 = 1'h0; // @[PMP.scala:178:39]
wire _res_T_294 = 1'h0; // @[PMP.scala:178:63]
wire _res_T_295 = 1'h0; // @[PMP.scala:178:50]
wire _res_T_297 = 1'h0; // @[PMP.scala:177:30]
wire _res_T_298 = 1'h0; // @[PMP.scala:177:37]
wire _res_T_299 = 1'h0; // @[PMP.scala:177:61]
wire _res_T_300 = 1'h0; // @[PMP.scala:177:48]
wire _res_T_301 = 1'h0; // @[PMP.scala:178:32]
wire _res_T_302 = 1'h0; // @[PMP.scala:178:39]
wire _res_T_303 = 1'h0; // @[PMP.scala:178:63]
wire _res_T_304 = 1'h0; // @[PMP.scala:178:50]
wire _res_T_306 = 1'h0; // @[PMP.scala:177:30]
wire _res_T_307 = 1'h0; // @[PMP.scala:177:37]
wire _res_T_308 = 1'h0; // @[PMP.scala:177:61]
wire _res_T_309 = 1'h0; // @[PMP.scala:177:48]
wire _res_T_310 = 1'h0; // @[PMP.scala:178:32]
wire _res_T_311 = 1'h0; // @[PMP.scala:178:39]
wire _res_T_312 = 1'h0; // @[PMP.scala:178:63]
wire _res_T_313 = 1'h0; // @[PMP.scala:178:50]
wire res_cur_6_cfg_l = 1'h0; // @[PMP.scala:181:23]
wire res_cur_6_cfg_x = 1'h0; // @[PMP.scala:181:23]
wire res_cur_6_cfg_w = 1'h0; // @[PMP.scala:181:23]
wire res_cur_6_cfg_r = 1'h0; // @[PMP.scala:181:23]
wire _res_cur_cfg_r_T_12 = 1'h0; // @[PMP.scala:182:40]
wire _res_cur_cfg_r_T_13 = 1'h0; // @[PMP.scala:182:26]
wire _res_cur_cfg_w_T_12 = 1'h0; // @[PMP.scala:183:40]
wire _res_cur_cfg_w_T_13 = 1'h0; // @[PMP.scala:183:26]
wire _res_cur_cfg_x_T_12 = 1'h0; // @[PMP.scala:184:40]
wire _res_cur_cfg_x_T_13 = 1'h0; // @[PMP.scala:184:26]
wire _res_T_314_cfg_l = 1'h0; // @[PMP.scala:185:8]
wire _res_T_314_cfg_x = 1'h0; // @[PMP.scala:185:8]
wire _res_T_314_cfg_w = 1'h0; // @[PMP.scala:185:8]
wire _res_T_314_cfg_r = 1'h0; // @[PMP.scala:185:8]
wire _res_hit_T_91 = 1'h0; // @[PMP.scala:45:20]
wire _res_hit_T_93 = 1'h0; // @[PMP.scala:46:26]
wire res_hit_msbsLess_14 = 1'h0; // @[PMP.scala:80:39]
wire res_hit_lsbsLess_14 = 1'h0; // @[PMP.scala:82:53]
wire _res_hit_T_97 = 1'h0; // @[PMP.scala:83:30]
wire _res_hit_T_98 = 1'h0; // @[PMP.scala:83:16]
wire res_hit_msbsLess_15 = 1'h0; // @[PMP.scala:80:39]
wire res_hit_lsbsLess_15 = 1'h0; // @[PMP.scala:82:53]
wire _res_hit_T_100 = 1'h0; // @[PMP.scala:83:30]
wire _res_hit_T_101 = 1'h0; // @[PMP.scala:83:16]
wire _res_hit_T_102 = 1'h0; // @[PMP.scala:94:48]
wire _res_hit_T_103 = 1'h0; // @[PMP.scala:132:61]
wire res_hit_7 = 1'h0; // @[PMP.scala:132:8]
wire res_ignore_7 = 1'h0; // @[PMP.scala:164:26]
wire _res_aligned_straddlesLowerBound_T_135 = 1'h0; // @[PMP.scala:123:147]
wire res_aligned_straddlesLowerBound_7 = 1'h0; // @[PMP.scala:123:90]
wire _res_aligned_straddlesUpperBound_T_135 = 1'h0; // @[PMP.scala:124:148]
wire res_aligned_straddlesUpperBound_7 = 1'h0; // @[PMP.scala:124:85]
wire _res_aligned_rangeAligned_T_7 = 1'h0; // @[PMP.scala:125:46]
wire _res_aligned_T_7 = 1'h0; // @[PMP.scala:45:20]
wire _res_T_316 = 1'h0; // @[PMP.scala:168:32]
wire _res_T_317 = 1'h0; // @[PMP.scala:168:32]
wire _res_T_318 = 1'h0; // @[PMP.scala:168:32]
wire _res_T_319 = 1'h0; // @[PMP.scala:170:30]
wire _res_T_323 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_325 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_327 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_329 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_331 = 1'h0; // @[PMP.scala:174:60]
wire _res_T_333 = 1'h0; // @[PMP.scala:177:30]
wire _res_T_334 = 1'h0; // @[PMP.scala:177:37]
wire _res_T_335 = 1'h0; // @[PMP.scala:177:61]
wire _res_T_336 = 1'h0; // @[PMP.scala:177:48]
wire _res_T_337 = 1'h0; // @[PMP.scala:178:32]
wire _res_T_338 = 1'h0; // @[PMP.scala:178:39]
wire _res_T_339 = 1'h0; // @[PMP.scala:178:63]
wire _res_T_340 = 1'h0; // @[PMP.scala:178:50]
wire _res_T_342 = 1'h0; // @[PMP.scala:177:30]
wire _res_T_343 = 1'h0; // @[PMP.scala:177:37]
wire _res_T_344 = 1'h0; // @[PMP.scala:177:61]
wire _res_T_345 = 1'h0; // @[PMP.scala:177:48]
wire _res_T_346 = 1'h0; // @[PMP.scala:178:32]
wire _res_T_347 = 1'h0; // @[PMP.scala:178:39]
wire _res_T_348 = 1'h0; // @[PMP.scala:178:63]
wire _res_T_349 = 1'h0; // @[PMP.scala:178:50]
wire _res_T_351 = 1'h0; // @[PMP.scala:177:30]
wire _res_T_352 = 1'h0; // @[PMP.scala:177:37]
wire _res_T_353 = 1'h0; // @[PMP.scala:177:61]
wire _res_T_354 = 1'h0; // @[PMP.scala:177:48]
wire _res_T_355 = 1'h0; // @[PMP.scala:178:32]
wire _res_T_356 = 1'h0; // @[PMP.scala:178:39]
wire _res_T_357 = 1'h0; // @[PMP.scala:178:63]
wire _res_T_358 = 1'h0; // @[PMP.scala:178:50]
wire res_cur_7_cfg_l = 1'h0; // @[PMP.scala:181:23]
wire res_cur_7_cfg_x = 1'h0; // @[PMP.scala:181:23]
wire res_cur_7_cfg_w = 1'h0; // @[PMP.scala:181:23]
wire res_cur_7_cfg_r = 1'h0; // @[PMP.scala:181:23]
wire _res_cur_cfg_r_T_14 = 1'h0; // @[PMP.scala:182:40]
wire _res_cur_cfg_r_T_15 = 1'h0; // @[PMP.scala:182:26]
wire _res_cur_cfg_w_T_14 = 1'h0; // @[PMP.scala:183:40]
wire _res_cur_cfg_w_T_15 = 1'h0; // @[PMP.scala:183:26]
wire _res_cur_cfg_x_T_14 = 1'h0; // @[PMP.scala:184:40]
wire _res_cur_cfg_x_T_15 = 1'h0; // @[PMP.scala:184:26]
wire res_cfg_l = 1'h0; // @[PMP.scala:185:8]
wire res_cfg_x = 1'h0; // @[PMP.scala:185:8]
wire res_cfg_w = 1'h0; // @[PMP.scala:185:8]
wire res_cfg_r = 1'h0; // @[PMP.scala:185:8]
wire [1:0] io_prv = 2'h1; // @[PMP.scala:143:7, :146:14]
wire [5:0] _GEN = 6'h7 << io_size_0; // @[package.scala:243:71]
wire [5:0] _res_hit_lsbMask_T; // @[package.scala:243:71]
assign _res_hit_lsbMask_T = _GEN; // @[package.scala:243:71]
wire [5:0] _res_hit_T_3; // @[package.scala:243:71]
assign _res_hit_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _res_aligned_lsbMask_T; // @[package.scala:243:71]
assign _res_aligned_lsbMask_T = _GEN; // @[package.scala:243:71]
wire [5:0] _res_hit_lsbMask_T_3; // @[package.scala:243:71]
assign _res_hit_lsbMask_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _res_hit_T_16; // @[package.scala:243:71]
assign _res_hit_T_16 = _GEN; // @[package.scala:243:71]
wire [5:0] _res_aligned_lsbMask_T_2; // @[package.scala:243:71]
assign _res_aligned_lsbMask_T_2 = _GEN; // @[package.scala:243:71]
wire [5:0] _res_hit_lsbMask_T_6; // @[package.scala:243:71]
assign _res_hit_lsbMask_T_6 = _GEN; // @[package.scala:243:71]
wire [5:0] _res_hit_T_29; // @[package.scala:243:71]
assign _res_hit_T_29 = _GEN; // @[package.scala:243:71]
wire [5:0] _res_aligned_lsbMask_T_4; // @[package.scala:243:71]
assign _res_aligned_lsbMask_T_4 = _GEN; // @[package.scala:243:71]
wire [5:0] _res_hit_lsbMask_T_9; // @[package.scala:243:71]
assign _res_hit_lsbMask_T_9 = _GEN; // @[package.scala:243:71]
wire [5:0] _res_hit_T_42; // @[package.scala:243:71]
assign _res_hit_T_42 = _GEN; // @[package.scala:243:71]
wire [5:0] _res_aligned_lsbMask_T_6; // @[package.scala:243:71]
assign _res_aligned_lsbMask_T_6 = _GEN; // @[package.scala:243:71]
wire [5:0] _res_hit_lsbMask_T_12; // @[package.scala:243:71]
assign _res_hit_lsbMask_T_12 = _GEN; // @[package.scala:243:71]
wire [5:0] _res_hit_T_55; // @[package.scala:243:71]
assign _res_hit_T_55 = _GEN; // @[package.scala:243:71]
wire [5:0] _res_aligned_lsbMask_T_8; // @[package.scala:243:71]
assign _res_aligned_lsbMask_T_8 = _GEN; // @[package.scala:243:71]
wire [5:0] _res_hit_lsbMask_T_15; // @[package.scala:243:71]
assign _res_hit_lsbMask_T_15 = _GEN; // @[package.scala:243:71]
wire [5:0] _res_hit_T_68; // @[package.scala:243:71]
assign _res_hit_T_68 = _GEN; // @[package.scala:243:71]
wire [5:0] _res_aligned_lsbMask_T_10; // @[package.scala:243:71]
assign _res_aligned_lsbMask_T_10 = _GEN; // @[package.scala:243:71]
wire [5:0] _res_hit_lsbMask_T_18; // @[package.scala:243:71]
assign _res_hit_lsbMask_T_18 = _GEN; // @[package.scala:243:71]
wire [5:0] _res_hit_T_81; // @[package.scala:243:71]
assign _res_hit_T_81 = _GEN; // @[package.scala:243:71]
wire [5:0] _res_aligned_lsbMask_T_12; // @[package.scala:243:71]
assign _res_aligned_lsbMask_T_12 = _GEN; // @[package.scala:243:71]
wire [5:0] _res_hit_lsbMask_T_21; // @[package.scala:243:71]
assign _res_hit_lsbMask_T_21 = _GEN; // @[package.scala:243:71]
wire [5:0] _res_hit_T_94; // @[package.scala:243:71]
assign _res_hit_T_94 = _GEN; // @[package.scala:243:71]
wire [5:0] _res_aligned_lsbMask_T_14; // @[package.scala:243:71]
assign _res_aligned_lsbMask_T_14 = _GEN; // @[package.scala:243:71]
wire [2:0] _res_hit_lsbMask_T_1 = _res_hit_lsbMask_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _res_hit_lsbMask_T_2 = ~_res_hit_lsbMask_T_1; // @[package.scala:243:{46,76}]
wire [31:0] res_hit_lsbMask = {29'h0, _res_hit_lsbMask_T_2}; // @[package.scala:243:46]
wire [28:0] _res_hit_msbMatch_T = io_addr_0[31:3]; // @[PMP.scala:69:29, :143:7]
wire [28:0] _res_hit_msbsLess_T = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7]
wire [28:0] _res_hit_msbsEqual_T = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7]
wire [28:0] _res_hit_msbsLess_T_6 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7]
wire [28:0] _res_hit_msbsEqual_T_7 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7]
wire [28:0] _res_aligned_straddlesLowerBound_T = io_addr_0[31:3]; // @[PMP.scala:69:29, :123:35, :143:7]
wire [28:0] _res_aligned_straddlesUpperBound_T = io_addr_0[31:3]; // @[PMP.scala:69:29, :124:35, :143:7]
wire [28:0] _res_hit_msbMatch_T_10 = io_addr_0[31:3]; // @[PMP.scala:69:29, :143:7]
wire [28:0] _res_hit_msbsLess_T_12 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7]
wire [28:0] _res_hit_msbsEqual_T_14 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7]
wire [28:0] _res_hit_msbsLess_T_18 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7]
wire [28:0] _res_hit_msbsEqual_T_21 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7]
wire [28:0] _res_aligned_straddlesLowerBound_T_17 = io_addr_0[31:3]; // @[PMP.scala:69:29, :123:35, :143:7]
wire [28:0] _res_aligned_straddlesUpperBound_T_17 = io_addr_0[31:3]; // @[PMP.scala:69:29, :124:35, :143:7]
wire [28:0] _res_hit_msbMatch_T_20 = io_addr_0[31:3]; // @[PMP.scala:69:29, :143:7]
wire [28:0] _res_hit_msbsLess_T_24 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7]
wire [28:0] _res_hit_msbsEqual_T_28 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7]
wire [28:0] _res_hit_msbsLess_T_30 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7]
wire [28:0] _res_hit_msbsEqual_T_35 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7]
wire [28:0] _res_aligned_straddlesLowerBound_T_34 = io_addr_0[31:3]; // @[PMP.scala:69:29, :123:35, :143:7]
wire [28:0] _res_aligned_straddlesUpperBound_T_34 = io_addr_0[31:3]; // @[PMP.scala:69:29, :124:35, :143:7]
wire [28:0] _res_hit_msbMatch_T_30 = io_addr_0[31:3]; // @[PMP.scala:69:29, :143:7]
wire [28:0] _res_hit_msbsLess_T_36 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7]
wire [28:0] _res_hit_msbsEqual_T_42 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7]
wire [28:0] _res_hit_msbsLess_T_42 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7]
wire [28:0] _res_hit_msbsEqual_T_49 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7]
wire [28:0] _res_aligned_straddlesLowerBound_T_51 = io_addr_0[31:3]; // @[PMP.scala:69:29, :123:35, :143:7]
wire [28:0] _res_aligned_straddlesUpperBound_T_51 = io_addr_0[31:3]; // @[PMP.scala:69:29, :124:35, :143:7]
wire [28:0] _res_hit_msbMatch_T_40 = io_addr_0[31:3]; // @[PMP.scala:69:29, :143:7]
wire [28:0] _res_hit_msbsLess_T_48 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7]
wire [28:0] _res_hit_msbsEqual_T_56 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7]
wire [28:0] _res_hit_msbsLess_T_54 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7]
wire [28:0] _res_hit_msbsEqual_T_63 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7]
wire [28:0] _res_aligned_straddlesLowerBound_T_68 = io_addr_0[31:3]; // @[PMP.scala:69:29, :123:35, :143:7]
wire [28:0] _res_aligned_straddlesUpperBound_T_68 = io_addr_0[31:3]; // @[PMP.scala:69:29, :124:35, :143:7]
wire [28:0] _res_hit_msbMatch_T_50 = io_addr_0[31:3]; // @[PMP.scala:69:29, :143:7]
wire [28:0] _res_hit_msbsLess_T_60 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7]
wire [28:0] _res_hit_msbsEqual_T_70 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7]
wire [28:0] _res_hit_msbsLess_T_66 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7]
wire [28:0] _res_hit_msbsEqual_T_77 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7]
wire [28:0] _res_aligned_straddlesLowerBound_T_85 = io_addr_0[31:3]; // @[PMP.scala:69:29, :123:35, :143:7]
wire [28:0] _res_aligned_straddlesUpperBound_T_85 = io_addr_0[31:3]; // @[PMP.scala:69:29, :124:35, :143:7]
wire [28:0] _res_hit_msbMatch_T_60 = io_addr_0[31:3]; // @[PMP.scala:69:29, :143:7]
wire [28:0] _res_hit_msbsLess_T_72 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7]
wire [28:0] _res_hit_msbsEqual_T_84 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7]
wire [28:0] _res_hit_msbsLess_T_78 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7]
wire [28:0] _res_hit_msbsEqual_T_91 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7]
wire [28:0] _res_aligned_straddlesLowerBound_T_102 = io_addr_0[31:3]; // @[PMP.scala:69:29, :123:35, :143:7]
wire [28:0] _res_aligned_straddlesUpperBound_T_102 = io_addr_0[31:3]; // @[PMP.scala:69:29, :124:35, :143:7]
wire [28:0] _res_hit_msbMatch_T_70 = io_addr_0[31:3]; // @[PMP.scala:69:29, :143:7]
wire [28:0] _res_hit_msbsLess_T_84 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7]
wire [28:0] _res_hit_msbsEqual_T_98 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7]
wire [28:0] _res_hit_msbsLess_T_90 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7]
wire [28:0] _res_hit_msbsEqual_T_105 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7]
wire [28:0] _res_aligned_straddlesLowerBound_T_119 = io_addr_0[31:3]; // @[PMP.scala:69:29, :123:35, :143:7]
wire [28:0] _res_aligned_straddlesUpperBound_T_119 = io_addr_0[31:3]; // @[PMP.scala:69:29, :124:35, :143:7]
wire [28:0] _res_hit_msbMatch_T_7 = _res_hit_msbMatch_T; // @[PMP.scala:63:47, :69:29]
wire [28:0] _res_hit_msbMatch_T_9 = _res_hit_msbMatch_T_7; // @[PMP.scala:63:{47,52}]
wire res_hit_msbMatch = _res_hit_msbMatch_T_9 == 29'h0; // @[PMP.scala:63:{52,58}, :68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [2:0] _res_hit_lsbMatch_T = io_addr_0[2:0]; // @[PMP.scala:70:28, :143:7]
wire [2:0] _res_hit_lsbsLess_T = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7]
wire [2:0] _res_hit_lsbsLess_T_7 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7]
wire [2:0] _res_aligned_straddlesLowerBound_T_13 = io_addr_0[2:0]; // @[PMP.scala:70:28, :123:129, :143:7]
wire [2:0] _res_aligned_straddlesUpperBound_T_13 = io_addr_0[2:0]; // @[PMP.scala:70:28, :124:119, :143:7]
wire [2:0] _res_hit_lsbMatch_T_10 = io_addr_0[2:0]; // @[PMP.scala:70:28, :143:7]
wire [2:0] _res_hit_lsbsLess_T_14 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7]
wire [2:0] _res_hit_lsbsLess_T_21 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7]
wire [2:0] _res_aligned_straddlesLowerBound_T_30 = io_addr_0[2:0]; // @[PMP.scala:70:28, :123:129, :143:7]
wire [2:0] _res_aligned_straddlesUpperBound_T_30 = io_addr_0[2:0]; // @[PMP.scala:70:28, :124:119, :143:7]
wire [2:0] _res_hit_lsbMatch_T_20 = io_addr_0[2:0]; // @[PMP.scala:70:28, :143:7]
wire [2:0] _res_hit_lsbsLess_T_28 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7]
wire [2:0] _res_hit_lsbsLess_T_35 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7]
wire [2:0] _res_aligned_straddlesLowerBound_T_47 = io_addr_0[2:0]; // @[PMP.scala:70:28, :123:129, :143:7]
wire [2:0] _res_aligned_straddlesUpperBound_T_47 = io_addr_0[2:0]; // @[PMP.scala:70:28, :124:119, :143:7]
wire [2:0] _res_hit_lsbMatch_T_30 = io_addr_0[2:0]; // @[PMP.scala:70:28, :143:7]
wire [2:0] _res_hit_lsbsLess_T_42 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7]
wire [2:0] _res_hit_lsbsLess_T_49 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7]
wire [2:0] _res_aligned_straddlesLowerBound_T_64 = io_addr_0[2:0]; // @[PMP.scala:70:28, :123:129, :143:7]
wire [2:0] _res_aligned_straddlesUpperBound_T_64 = io_addr_0[2:0]; // @[PMP.scala:70:28, :124:119, :143:7]
wire [2:0] _res_hit_lsbMatch_T_40 = io_addr_0[2:0]; // @[PMP.scala:70:28, :143:7]
wire [2:0] _res_hit_lsbsLess_T_56 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7]
wire [2:0] _res_hit_lsbsLess_T_63 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7]
wire [2:0] _res_aligned_straddlesLowerBound_T_81 = io_addr_0[2:0]; // @[PMP.scala:70:28, :123:129, :143:7]
wire [2:0] _res_aligned_straddlesUpperBound_T_81 = io_addr_0[2:0]; // @[PMP.scala:70:28, :124:119, :143:7]
wire [2:0] _res_hit_lsbMatch_T_50 = io_addr_0[2:0]; // @[PMP.scala:70:28, :143:7]
wire [2:0] _res_hit_lsbsLess_T_70 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7]
wire [2:0] _res_hit_lsbsLess_T_77 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7]
wire [2:0] _res_aligned_straddlesLowerBound_T_98 = io_addr_0[2:0]; // @[PMP.scala:70:28, :123:129, :143:7]
wire [2:0] _res_aligned_straddlesUpperBound_T_98 = io_addr_0[2:0]; // @[PMP.scala:70:28, :124:119, :143:7]
wire [2:0] _res_hit_lsbMatch_T_60 = io_addr_0[2:0]; // @[PMP.scala:70:28, :143:7]
wire [2:0] _res_hit_lsbsLess_T_84 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7]
wire [2:0] _res_hit_lsbsLess_T_91 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7]
wire [2:0] _res_aligned_straddlesLowerBound_T_115 = io_addr_0[2:0]; // @[PMP.scala:70:28, :123:129, :143:7]
wire [2:0] _res_aligned_straddlesUpperBound_T_115 = io_addr_0[2:0]; // @[PMP.scala:70:28, :124:119, :143:7]
wire [2:0] _res_hit_lsbMatch_T_70 = io_addr_0[2:0]; // @[PMP.scala:70:28, :143:7]
wire [2:0] _res_hit_lsbsLess_T_98 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7]
wire [2:0] _res_hit_lsbsLess_T_105 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7]
wire [2:0] _res_aligned_straddlesLowerBound_T_132 = io_addr_0[2:0]; // @[PMP.scala:70:28, :123:129, :143:7]
wire [2:0] _res_aligned_straddlesUpperBound_T_132 = io_addr_0[2:0]; // @[PMP.scala:70:28, :124:119, :143:7]
wire [2:0] _res_hit_lsbMatch_T_7 = _res_hit_lsbMatch_T; // @[PMP.scala:63:47, :70:28]
wire [2:0] _res_hit_lsbMatch_T_6 = res_hit_lsbMask[2:0]; // @[PMP.scala:68:26, :70:80]
wire [2:0] _res_hit_lsbMatch_T_8 = ~_res_hit_lsbMatch_T_6; // @[PMP.scala:63:54, :70:80]
wire [2:0] _res_hit_lsbMatch_T_9 = _res_hit_lsbMatch_T_7 & _res_hit_lsbMatch_T_8; // @[PMP.scala:63:{47,52,54}]
wire res_hit_lsbMatch = _res_hit_lsbMatch_T_9 == 3'h0; // @[PMP.scala:63:{52,58}, :68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire _res_hit_T_1 = res_hit_msbMatch & res_hit_lsbMatch; // @[PMP.scala:63:58, :71:16]
wire [2:0] _res_hit_T_4 = _res_hit_T_3[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _res_hit_T_5 = ~_res_hit_T_4; // @[package.scala:243:{46,76}]
wire [28:0] _res_hit_msbsEqual_T_6 = _res_hit_msbsEqual_T; // @[PMP.scala:81:{27,41}]
wire res_hit_msbsEqual = _res_hit_msbsEqual_T_6 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62]
wire [2:0] _res_hit_lsbsLess_T_1 = _res_hit_lsbsLess_T | _res_hit_T_5; // @[package.scala:243:46]
wire [28:0] _res_hit_msbsEqual_T_13 = _res_hit_msbsEqual_T_7; // @[PMP.scala:81:{27,41}]
wire res_hit_msbsEqual_1 = _res_hit_msbsEqual_T_13 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62]
wire [2:0] _res_hit_lsbsLess_T_8 = _res_hit_lsbsLess_T_7; // @[PMP.scala:82:{25,42}]
wire [2:0] _res_aligned_lsbMask_T_1 = _res_aligned_lsbMask_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] res_aligned_lsbMask = ~_res_aligned_lsbMask_T_1; // @[package.scala:243:{46,76}]
wire [2:0] _res_aligned_pow2Aligned_T_2 = res_aligned_lsbMask; // @[package.scala:243:46]
wire [28:0] _res_aligned_straddlesLowerBound_T_6 = _res_aligned_straddlesLowerBound_T; // @[PMP.scala:123:{35,49}]
wire _res_aligned_straddlesLowerBound_T_7 = _res_aligned_straddlesLowerBound_T_6 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:{49,67,82}, :124:62]
wire [2:0] _res_aligned_straddlesLowerBound_T_14 = ~_res_aligned_straddlesLowerBound_T_13; // @[PMP.scala:123:{127,129}]
wire [28:0] _res_aligned_straddlesUpperBound_T_6 = _res_aligned_straddlesUpperBound_T; // @[PMP.scala:124:{35,49}]
wire _res_aligned_straddlesUpperBound_T_7 = _res_aligned_straddlesUpperBound_T_6 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:{49,62,77}]
wire [2:0] _res_aligned_straddlesUpperBound_T_14 = _res_aligned_straddlesUpperBound_T_13 | res_aligned_lsbMask; // @[package.scala:243:46]
wire res_aligned_pow2Aligned = _res_aligned_pow2Aligned_T_2 == 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:{32,39,57}, :174:26]
wire [2:0] _res_hit_lsbMask_T_4 = _res_hit_lsbMask_T_3[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _res_hit_lsbMask_T_5 = ~_res_hit_lsbMask_T_4; // @[package.scala:243:{46,76}]
wire [31:0] res_hit_lsbMask_1 = {29'h0, _res_hit_lsbMask_T_5}; // @[package.scala:243:46]
wire [28:0] _res_hit_msbMatch_T_17 = _res_hit_msbMatch_T_10; // @[PMP.scala:63:47, :69:29]
wire [28:0] _res_hit_msbMatch_T_19 = _res_hit_msbMatch_T_17; // @[PMP.scala:63:{47,52}]
wire res_hit_msbMatch_1 = _res_hit_msbMatch_T_19 == 29'h0; // @[PMP.scala:63:{52,58}, :68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [2:0] _res_hit_lsbMatch_T_17 = _res_hit_lsbMatch_T_10; // @[PMP.scala:63:47, :70:28]
wire [2:0] _res_hit_lsbMatch_T_16 = res_hit_lsbMask_1[2:0]; // @[PMP.scala:68:26, :70:80]
wire [2:0] _res_hit_lsbMatch_T_18 = ~_res_hit_lsbMatch_T_16; // @[PMP.scala:63:54, :70:80]
wire [2:0] _res_hit_lsbMatch_T_19 = _res_hit_lsbMatch_T_17 & _res_hit_lsbMatch_T_18; // @[PMP.scala:63:{47,52,54}]
wire res_hit_lsbMatch_1 = _res_hit_lsbMatch_T_19 == 3'h0; // @[PMP.scala:63:{52,58}, :68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire _res_hit_T_14 = res_hit_msbMatch_1 & res_hit_lsbMatch_1; // @[PMP.scala:63:58, :71:16]
wire [2:0] _res_hit_T_17 = _res_hit_T_16[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _res_hit_T_18 = ~_res_hit_T_17; // @[package.scala:243:{46,76}]
wire [28:0] _res_hit_msbsEqual_T_20 = _res_hit_msbsEqual_T_14; // @[PMP.scala:81:{27,41}]
wire res_hit_msbsEqual_2 = _res_hit_msbsEqual_T_20 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62]
wire [2:0] _res_hit_lsbsLess_T_15 = _res_hit_lsbsLess_T_14 | _res_hit_T_18; // @[package.scala:243:46]
wire [28:0] _res_hit_msbsEqual_T_27 = _res_hit_msbsEqual_T_21; // @[PMP.scala:81:{27,41}]
wire res_hit_msbsEqual_3 = _res_hit_msbsEqual_T_27 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62]
wire [2:0] _res_hit_lsbsLess_T_22 = _res_hit_lsbsLess_T_21; // @[PMP.scala:82:{25,42}]
wire [2:0] _res_aligned_lsbMask_T_3 = _res_aligned_lsbMask_T_2[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] res_aligned_lsbMask_1 = ~_res_aligned_lsbMask_T_3; // @[package.scala:243:{46,76}]
wire [2:0] _res_aligned_pow2Aligned_T_5 = res_aligned_lsbMask_1; // @[package.scala:243:46]
wire [28:0] _res_aligned_straddlesLowerBound_T_23 = _res_aligned_straddlesLowerBound_T_17; // @[PMP.scala:123:{35,49}]
wire _res_aligned_straddlesLowerBound_T_24 = _res_aligned_straddlesLowerBound_T_23 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:{49,67,82}, :124:62]
wire [2:0] _res_aligned_straddlesLowerBound_T_31 = ~_res_aligned_straddlesLowerBound_T_30; // @[PMP.scala:123:{127,129}]
wire [28:0] _res_aligned_straddlesUpperBound_T_23 = _res_aligned_straddlesUpperBound_T_17; // @[PMP.scala:124:{35,49}]
wire _res_aligned_straddlesUpperBound_T_24 = _res_aligned_straddlesUpperBound_T_23 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:{49,62,77}]
wire [2:0] _res_aligned_straddlesUpperBound_T_31 = _res_aligned_straddlesUpperBound_T_30 | res_aligned_lsbMask_1; // @[package.scala:243:46]
wire res_aligned_pow2Aligned_1 = _res_aligned_pow2Aligned_T_5 == 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:{32,39,57}, :174:26]
wire [2:0] _res_hit_lsbMask_T_7 = _res_hit_lsbMask_T_6[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _res_hit_lsbMask_T_8 = ~_res_hit_lsbMask_T_7; // @[package.scala:243:{46,76}]
wire [31:0] res_hit_lsbMask_2 = {29'h0, _res_hit_lsbMask_T_8}; // @[package.scala:243:46]
wire [28:0] _res_hit_msbMatch_T_27 = _res_hit_msbMatch_T_20; // @[PMP.scala:63:47, :69:29]
wire [28:0] _res_hit_msbMatch_T_29 = _res_hit_msbMatch_T_27; // @[PMP.scala:63:{47,52}]
wire res_hit_msbMatch_2 = _res_hit_msbMatch_T_29 == 29'h0; // @[PMP.scala:63:{52,58}, :68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [2:0] _res_hit_lsbMatch_T_27 = _res_hit_lsbMatch_T_20; // @[PMP.scala:63:47, :70:28]
wire [2:0] _res_hit_lsbMatch_T_26 = res_hit_lsbMask_2[2:0]; // @[PMP.scala:68:26, :70:80]
wire [2:0] _res_hit_lsbMatch_T_28 = ~_res_hit_lsbMatch_T_26; // @[PMP.scala:63:54, :70:80]
wire [2:0] _res_hit_lsbMatch_T_29 = _res_hit_lsbMatch_T_27 & _res_hit_lsbMatch_T_28; // @[PMP.scala:63:{47,52,54}]
wire res_hit_lsbMatch_2 = _res_hit_lsbMatch_T_29 == 3'h0; // @[PMP.scala:63:{52,58}, :68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire _res_hit_T_27 = res_hit_msbMatch_2 & res_hit_lsbMatch_2; // @[PMP.scala:63:58, :71:16]
wire [2:0] _res_hit_T_30 = _res_hit_T_29[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _res_hit_T_31 = ~_res_hit_T_30; // @[package.scala:243:{46,76}]
wire [28:0] _res_hit_msbsEqual_T_34 = _res_hit_msbsEqual_T_28; // @[PMP.scala:81:{27,41}]
wire res_hit_msbsEqual_4 = _res_hit_msbsEqual_T_34 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62]
wire [2:0] _res_hit_lsbsLess_T_29 = _res_hit_lsbsLess_T_28 | _res_hit_T_31; // @[package.scala:243:46]
wire [28:0] _res_hit_msbsEqual_T_41 = _res_hit_msbsEqual_T_35; // @[PMP.scala:81:{27,41}]
wire res_hit_msbsEqual_5 = _res_hit_msbsEqual_T_41 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62]
wire [2:0] _res_hit_lsbsLess_T_36 = _res_hit_lsbsLess_T_35; // @[PMP.scala:82:{25,42}]
wire [2:0] _res_aligned_lsbMask_T_5 = _res_aligned_lsbMask_T_4[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] res_aligned_lsbMask_2 = ~_res_aligned_lsbMask_T_5; // @[package.scala:243:{46,76}]
wire [2:0] _res_aligned_pow2Aligned_T_8 = res_aligned_lsbMask_2; // @[package.scala:243:46]
wire [28:0] _res_aligned_straddlesLowerBound_T_40 = _res_aligned_straddlesLowerBound_T_34; // @[PMP.scala:123:{35,49}]
wire _res_aligned_straddlesLowerBound_T_41 = _res_aligned_straddlesLowerBound_T_40 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:{49,67,82}, :124:62]
wire [2:0] _res_aligned_straddlesLowerBound_T_48 = ~_res_aligned_straddlesLowerBound_T_47; // @[PMP.scala:123:{127,129}]
wire [28:0] _res_aligned_straddlesUpperBound_T_40 = _res_aligned_straddlesUpperBound_T_34; // @[PMP.scala:124:{35,49}]
wire _res_aligned_straddlesUpperBound_T_41 = _res_aligned_straddlesUpperBound_T_40 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:{49,62,77}]
wire [2:0] _res_aligned_straddlesUpperBound_T_48 = _res_aligned_straddlesUpperBound_T_47 | res_aligned_lsbMask_2; // @[package.scala:243:46]
wire res_aligned_pow2Aligned_2 = _res_aligned_pow2Aligned_T_8 == 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:{32,39,57}, :174:26]
wire [2:0] _res_hit_lsbMask_T_10 = _res_hit_lsbMask_T_9[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _res_hit_lsbMask_T_11 = ~_res_hit_lsbMask_T_10; // @[package.scala:243:{46,76}]
wire [31:0] res_hit_lsbMask_3 = {29'h0, _res_hit_lsbMask_T_11}; // @[package.scala:243:46]
wire [28:0] _res_hit_msbMatch_T_37 = _res_hit_msbMatch_T_30; // @[PMP.scala:63:47, :69:29]
wire [28:0] _res_hit_msbMatch_T_39 = _res_hit_msbMatch_T_37; // @[PMP.scala:63:{47,52}]
wire res_hit_msbMatch_3 = _res_hit_msbMatch_T_39 == 29'h0; // @[PMP.scala:63:{52,58}, :68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [2:0] _res_hit_lsbMatch_T_37 = _res_hit_lsbMatch_T_30; // @[PMP.scala:63:47, :70:28]
wire [2:0] _res_hit_lsbMatch_T_36 = res_hit_lsbMask_3[2:0]; // @[PMP.scala:68:26, :70:80]
wire [2:0] _res_hit_lsbMatch_T_38 = ~_res_hit_lsbMatch_T_36; // @[PMP.scala:63:54, :70:80]
wire [2:0] _res_hit_lsbMatch_T_39 = _res_hit_lsbMatch_T_37 & _res_hit_lsbMatch_T_38; // @[PMP.scala:63:{47,52,54}]
wire res_hit_lsbMatch_3 = _res_hit_lsbMatch_T_39 == 3'h0; // @[PMP.scala:63:{52,58}, :68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire _res_hit_T_40 = res_hit_msbMatch_3 & res_hit_lsbMatch_3; // @[PMP.scala:63:58, :71:16]
wire [2:0] _res_hit_T_43 = _res_hit_T_42[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _res_hit_T_44 = ~_res_hit_T_43; // @[package.scala:243:{46,76}]
wire [28:0] _res_hit_msbsEqual_T_48 = _res_hit_msbsEqual_T_42; // @[PMP.scala:81:{27,41}]
wire res_hit_msbsEqual_6 = _res_hit_msbsEqual_T_48 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62]
wire [2:0] _res_hit_lsbsLess_T_43 = _res_hit_lsbsLess_T_42 | _res_hit_T_44; // @[package.scala:243:46]
wire [28:0] _res_hit_msbsEqual_T_55 = _res_hit_msbsEqual_T_49; // @[PMP.scala:81:{27,41}]
wire res_hit_msbsEqual_7 = _res_hit_msbsEqual_T_55 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62]
wire [2:0] _res_hit_lsbsLess_T_50 = _res_hit_lsbsLess_T_49; // @[PMP.scala:82:{25,42}]
wire [2:0] _res_aligned_lsbMask_T_7 = _res_aligned_lsbMask_T_6[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] res_aligned_lsbMask_3 = ~_res_aligned_lsbMask_T_7; // @[package.scala:243:{46,76}]
wire [2:0] _res_aligned_pow2Aligned_T_11 = res_aligned_lsbMask_3; // @[package.scala:243:46]
wire [28:0] _res_aligned_straddlesLowerBound_T_57 = _res_aligned_straddlesLowerBound_T_51; // @[PMP.scala:123:{35,49}]
wire _res_aligned_straddlesLowerBound_T_58 = _res_aligned_straddlesLowerBound_T_57 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:{49,67,82}, :124:62]
wire [2:0] _res_aligned_straddlesLowerBound_T_65 = ~_res_aligned_straddlesLowerBound_T_64; // @[PMP.scala:123:{127,129}]
wire [28:0] _res_aligned_straddlesUpperBound_T_57 = _res_aligned_straddlesUpperBound_T_51; // @[PMP.scala:124:{35,49}]
wire _res_aligned_straddlesUpperBound_T_58 = _res_aligned_straddlesUpperBound_T_57 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:{49,62,77}]
wire [2:0] _res_aligned_straddlesUpperBound_T_65 = _res_aligned_straddlesUpperBound_T_64 | res_aligned_lsbMask_3; // @[package.scala:243:46]
wire res_aligned_pow2Aligned_3 = _res_aligned_pow2Aligned_T_11 == 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:{32,39,57}, :174:26]
wire [2:0] _res_hit_lsbMask_T_13 = _res_hit_lsbMask_T_12[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _res_hit_lsbMask_T_14 = ~_res_hit_lsbMask_T_13; // @[package.scala:243:{46,76}]
wire [31:0] res_hit_lsbMask_4 = {29'h0, _res_hit_lsbMask_T_14}; // @[package.scala:243:46]
wire [28:0] _res_hit_msbMatch_T_47 = _res_hit_msbMatch_T_40; // @[PMP.scala:63:47, :69:29]
wire [28:0] _res_hit_msbMatch_T_49 = _res_hit_msbMatch_T_47; // @[PMP.scala:63:{47,52}]
wire res_hit_msbMatch_4 = _res_hit_msbMatch_T_49 == 29'h0; // @[PMP.scala:63:{52,58}, :68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [2:0] _res_hit_lsbMatch_T_47 = _res_hit_lsbMatch_T_40; // @[PMP.scala:63:47, :70:28]
wire [2:0] _res_hit_lsbMatch_T_46 = res_hit_lsbMask_4[2:0]; // @[PMP.scala:68:26, :70:80]
wire [2:0] _res_hit_lsbMatch_T_48 = ~_res_hit_lsbMatch_T_46; // @[PMP.scala:63:54, :70:80]
wire [2:0] _res_hit_lsbMatch_T_49 = _res_hit_lsbMatch_T_47 & _res_hit_lsbMatch_T_48; // @[PMP.scala:63:{47,52,54}]
wire res_hit_lsbMatch_4 = _res_hit_lsbMatch_T_49 == 3'h0; // @[PMP.scala:63:{52,58}, :68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire _res_hit_T_53 = res_hit_msbMatch_4 & res_hit_lsbMatch_4; // @[PMP.scala:63:58, :71:16]
wire [2:0] _res_hit_T_56 = _res_hit_T_55[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _res_hit_T_57 = ~_res_hit_T_56; // @[package.scala:243:{46,76}]
wire [28:0] _res_hit_msbsEqual_T_62 = _res_hit_msbsEqual_T_56; // @[PMP.scala:81:{27,41}]
wire res_hit_msbsEqual_8 = _res_hit_msbsEqual_T_62 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62]
wire [2:0] _res_hit_lsbsLess_T_57 = _res_hit_lsbsLess_T_56 | _res_hit_T_57; // @[package.scala:243:46]
wire [28:0] _res_hit_msbsEqual_T_69 = _res_hit_msbsEqual_T_63; // @[PMP.scala:81:{27,41}]
wire res_hit_msbsEqual_9 = _res_hit_msbsEqual_T_69 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62]
wire [2:0] _res_hit_lsbsLess_T_64 = _res_hit_lsbsLess_T_63; // @[PMP.scala:82:{25,42}]
wire [2:0] _res_aligned_lsbMask_T_9 = _res_aligned_lsbMask_T_8[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] res_aligned_lsbMask_4 = ~_res_aligned_lsbMask_T_9; // @[package.scala:243:{46,76}]
wire [2:0] _res_aligned_pow2Aligned_T_14 = res_aligned_lsbMask_4; // @[package.scala:243:46]
wire [28:0] _res_aligned_straddlesLowerBound_T_74 = _res_aligned_straddlesLowerBound_T_68; // @[PMP.scala:123:{35,49}]
wire _res_aligned_straddlesLowerBound_T_75 = _res_aligned_straddlesLowerBound_T_74 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:{49,67,82}, :124:62]
wire [2:0] _res_aligned_straddlesLowerBound_T_82 = ~_res_aligned_straddlesLowerBound_T_81; // @[PMP.scala:123:{127,129}]
wire [28:0] _res_aligned_straddlesUpperBound_T_74 = _res_aligned_straddlesUpperBound_T_68; // @[PMP.scala:124:{35,49}]
wire _res_aligned_straddlesUpperBound_T_75 = _res_aligned_straddlesUpperBound_T_74 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:{49,62,77}]
wire [2:0] _res_aligned_straddlesUpperBound_T_82 = _res_aligned_straddlesUpperBound_T_81 | res_aligned_lsbMask_4; // @[package.scala:243:46]
wire res_aligned_pow2Aligned_4 = _res_aligned_pow2Aligned_T_14 == 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:{32,39,57}, :174:26]
wire [2:0] _res_hit_lsbMask_T_16 = _res_hit_lsbMask_T_15[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _res_hit_lsbMask_T_17 = ~_res_hit_lsbMask_T_16; // @[package.scala:243:{46,76}]
wire [31:0] res_hit_lsbMask_5 = {29'h0, _res_hit_lsbMask_T_17}; // @[package.scala:243:46]
wire [28:0] _res_hit_msbMatch_T_57 = _res_hit_msbMatch_T_50; // @[PMP.scala:63:47, :69:29]
wire [28:0] _res_hit_msbMatch_T_59 = _res_hit_msbMatch_T_57; // @[PMP.scala:63:{47,52}]
wire res_hit_msbMatch_5 = _res_hit_msbMatch_T_59 == 29'h0; // @[PMP.scala:63:{52,58}, :68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [2:0] _res_hit_lsbMatch_T_57 = _res_hit_lsbMatch_T_50; // @[PMP.scala:63:47, :70:28]
wire [2:0] _res_hit_lsbMatch_T_56 = res_hit_lsbMask_5[2:0]; // @[PMP.scala:68:26, :70:80]
wire [2:0] _res_hit_lsbMatch_T_58 = ~_res_hit_lsbMatch_T_56; // @[PMP.scala:63:54, :70:80]
wire [2:0] _res_hit_lsbMatch_T_59 = _res_hit_lsbMatch_T_57 & _res_hit_lsbMatch_T_58; // @[PMP.scala:63:{47,52,54}]
wire res_hit_lsbMatch_5 = _res_hit_lsbMatch_T_59 == 3'h0; // @[PMP.scala:63:{52,58}, :68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire _res_hit_T_66 = res_hit_msbMatch_5 & res_hit_lsbMatch_5; // @[PMP.scala:63:58, :71:16]
wire [2:0] _res_hit_T_69 = _res_hit_T_68[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _res_hit_T_70 = ~_res_hit_T_69; // @[package.scala:243:{46,76}]
wire [28:0] _res_hit_msbsEqual_T_76 = _res_hit_msbsEqual_T_70; // @[PMP.scala:81:{27,41}]
wire res_hit_msbsEqual_10 = _res_hit_msbsEqual_T_76 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62]
wire [2:0] _res_hit_lsbsLess_T_71 = _res_hit_lsbsLess_T_70 | _res_hit_T_70; // @[package.scala:243:46]
wire [28:0] _res_hit_msbsEqual_T_83 = _res_hit_msbsEqual_T_77; // @[PMP.scala:81:{27,41}]
wire res_hit_msbsEqual_11 = _res_hit_msbsEqual_T_83 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62]
wire [2:0] _res_hit_lsbsLess_T_78 = _res_hit_lsbsLess_T_77; // @[PMP.scala:82:{25,42}]
wire [2:0] _res_aligned_lsbMask_T_11 = _res_aligned_lsbMask_T_10[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] res_aligned_lsbMask_5 = ~_res_aligned_lsbMask_T_11; // @[package.scala:243:{46,76}]
wire [2:0] _res_aligned_pow2Aligned_T_17 = res_aligned_lsbMask_5; // @[package.scala:243:46]
wire [28:0] _res_aligned_straddlesLowerBound_T_91 = _res_aligned_straddlesLowerBound_T_85; // @[PMP.scala:123:{35,49}]
wire _res_aligned_straddlesLowerBound_T_92 = _res_aligned_straddlesLowerBound_T_91 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:{49,67,82}, :124:62]
wire [2:0] _res_aligned_straddlesLowerBound_T_99 = ~_res_aligned_straddlesLowerBound_T_98; // @[PMP.scala:123:{127,129}]
wire [28:0] _res_aligned_straddlesUpperBound_T_91 = _res_aligned_straddlesUpperBound_T_85; // @[PMP.scala:124:{35,49}]
wire _res_aligned_straddlesUpperBound_T_92 = _res_aligned_straddlesUpperBound_T_91 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:{49,62,77}]
wire [2:0] _res_aligned_straddlesUpperBound_T_99 = _res_aligned_straddlesUpperBound_T_98 | res_aligned_lsbMask_5; // @[package.scala:243:46]
wire res_aligned_pow2Aligned_5 = _res_aligned_pow2Aligned_T_17 == 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:{32,39,57}, :174:26]
wire [2:0] _res_hit_lsbMask_T_19 = _res_hit_lsbMask_T_18[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _res_hit_lsbMask_T_20 = ~_res_hit_lsbMask_T_19; // @[package.scala:243:{46,76}]
wire [31:0] res_hit_lsbMask_6 = {29'h0, _res_hit_lsbMask_T_20}; // @[package.scala:243:46]
wire [28:0] _res_hit_msbMatch_T_67 = _res_hit_msbMatch_T_60; // @[PMP.scala:63:47, :69:29]
wire [28:0] _res_hit_msbMatch_T_69 = _res_hit_msbMatch_T_67; // @[PMP.scala:63:{47,52}]
wire res_hit_msbMatch_6 = _res_hit_msbMatch_T_69 == 29'h0; // @[PMP.scala:63:{52,58}, :68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [2:0] _res_hit_lsbMatch_T_67 = _res_hit_lsbMatch_T_60; // @[PMP.scala:63:47, :70:28]
wire [2:0] _res_hit_lsbMatch_T_66 = res_hit_lsbMask_6[2:0]; // @[PMP.scala:68:26, :70:80]
wire [2:0] _res_hit_lsbMatch_T_68 = ~_res_hit_lsbMatch_T_66; // @[PMP.scala:63:54, :70:80]
wire [2:0] _res_hit_lsbMatch_T_69 = _res_hit_lsbMatch_T_67 & _res_hit_lsbMatch_T_68; // @[PMP.scala:63:{47,52,54}]
wire res_hit_lsbMatch_6 = _res_hit_lsbMatch_T_69 == 3'h0; // @[PMP.scala:63:{52,58}, :68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire _res_hit_T_79 = res_hit_msbMatch_6 & res_hit_lsbMatch_6; // @[PMP.scala:63:58, :71:16]
wire [2:0] _res_hit_T_82 = _res_hit_T_81[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _res_hit_T_83 = ~_res_hit_T_82; // @[package.scala:243:{46,76}]
wire [28:0] _res_hit_msbsEqual_T_90 = _res_hit_msbsEqual_T_84; // @[PMP.scala:81:{27,41}]
wire res_hit_msbsEqual_12 = _res_hit_msbsEqual_T_90 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62]
wire [2:0] _res_hit_lsbsLess_T_85 = _res_hit_lsbsLess_T_84 | _res_hit_T_83; // @[package.scala:243:46]
wire [28:0] _res_hit_msbsEqual_T_97 = _res_hit_msbsEqual_T_91; // @[PMP.scala:81:{27,41}]
wire res_hit_msbsEqual_13 = _res_hit_msbsEqual_T_97 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62]
wire [2:0] _res_hit_lsbsLess_T_92 = _res_hit_lsbsLess_T_91; // @[PMP.scala:82:{25,42}]
wire [2:0] _res_aligned_lsbMask_T_13 = _res_aligned_lsbMask_T_12[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] res_aligned_lsbMask_6 = ~_res_aligned_lsbMask_T_13; // @[package.scala:243:{46,76}]
wire [2:0] _res_aligned_pow2Aligned_T_20 = res_aligned_lsbMask_6; // @[package.scala:243:46]
wire [28:0] _res_aligned_straddlesLowerBound_T_108 = _res_aligned_straddlesLowerBound_T_102; // @[PMP.scala:123:{35,49}]
wire _res_aligned_straddlesLowerBound_T_109 = _res_aligned_straddlesLowerBound_T_108 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:{49,67,82}, :124:62]
wire [2:0] _res_aligned_straddlesLowerBound_T_116 = ~_res_aligned_straddlesLowerBound_T_115; // @[PMP.scala:123:{127,129}]
wire [28:0] _res_aligned_straddlesUpperBound_T_108 = _res_aligned_straddlesUpperBound_T_102; // @[PMP.scala:124:{35,49}]
wire _res_aligned_straddlesUpperBound_T_109 = _res_aligned_straddlesUpperBound_T_108 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:{49,62,77}]
wire [2:0] _res_aligned_straddlesUpperBound_T_116 = _res_aligned_straddlesUpperBound_T_115 | res_aligned_lsbMask_6; // @[package.scala:243:46]
wire res_aligned_pow2Aligned_6 = _res_aligned_pow2Aligned_T_20 == 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:{32,39,57}, :174:26]
wire [2:0] _res_hit_lsbMask_T_22 = _res_hit_lsbMask_T_21[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _res_hit_lsbMask_T_23 = ~_res_hit_lsbMask_T_22; // @[package.scala:243:{46,76}]
wire [31:0] res_hit_lsbMask_7 = {29'h0, _res_hit_lsbMask_T_23}; // @[package.scala:243:46]
wire [28:0] _res_hit_msbMatch_T_77 = _res_hit_msbMatch_T_70; // @[PMP.scala:63:47, :69:29]
wire [28:0] _res_hit_msbMatch_T_79 = _res_hit_msbMatch_T_77; // @[PMP.scala:63:{47,52}]
wire res_hit_msbMatch_7 = _res_hit_msbMatch_T_79 == 29'h0; // @[PMP.scala:63:{52,58}, :68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62]
wire [2:0] _res_hit_lsbMatch_T_77 = _res_hit_lsbMatch_T_70; // @[PMP.scala:63:47, :70:28]
wire [2:0] _res_hit_lsbMatch_T_76 = res_hit_lsbMask_7[2:0]; // @[PMP.scala:68:26, :70:80]
wire [2:0] _res_hit_lsbMatch_T_78 = ~_res_hit_lsbMatch_T_76; // @[PMP.scala:63:54, :70:80]
wire [2:0] _res_hit_lsbMatch_T_79 = _res_hit_lsbMatch_T_77 & _res_hit_lsbMatch_T_78; // @[PMP.scala:63:{47,52,54}]
wire res_hit_lsbMatch_7 = _res_hit_lsbMatch_T_79 == 3'h0; // @[PMP.scala:63:{52,58}, :68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26]
wire _res_hit_T_92 = res_hit_msbMatch_7 & res_hit_lsbMatch_7; // @[PMP.scala:63:58, :71:16]
wire [2:0] _res_hit_T_95 = _res_hit_T_94[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _res_hit_T_96 = ~_res_hit_T_95; // @[package.scala:243:{46,76}]
wire [28:0] _res_hit_msbsEqual_T_104 = _res_hit_msbsEqual_T_98; // @[PMP.scala:81:{27,41}]
wire res_hit_msbsEqual_14 = _res_hit_msbsEqual_T_104 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62]
wire [2:0] _res_hit_lsbsLess_T_99 = _res_hit_lsbsLess_T_98 | _res_hit_T_96; // @[package.scala:243:46]
wire [28:0] _res_hit_msbsEqual_T_111 = _res_hit_msbsEqual_T_105; // @[PMP.scala:81:{27,41}]
wire res_hit_msbsEqual_15 = _res_hit_msbsEqual_T_111 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62]
wire [2:0] _res_hit_lsbsLess_T_106 = _res_hit_lsbsLess_T_105; // @[PMP.scala:82:{25,42}]
wire [2:0] _res_aligned_lsbMask_T_15 = _res_aligned_lsbMask_T_14[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] res_aligned_lsbMask_7 = ~_res_aligned_lsbMask_T_15; // @[package.scala:243:{46,76}]
wire [2:0] _res_aligned_pow2Aligned_T_23 = res_aligned_lsbMask_7; // @[package.scala:243:46]
wire [28:0] _res_aligned_straddlesLowerBound_T_125 = _res_aligned_straddlesLowerBound_T_119; // @[PMP.scala:123:{35,49}]
wire _res_aligned_straddlesLowerBound_T_126 = _res_aligned_straddlesLowerBound_T_125 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:{49,67,82}, :124:62]
wire [2:0] _res_aligned_straddlesLowerBound_T_133 = ~_res_aligned_straddlesLowerBound_T_132; // @[PMP.scala:123:{127,129}]
wire [28:0] _res_aligned_straddlesUpperBound_T_125 = _res_aligned_straddlesUpperBound_T_119; // @[PMP.scala:124:{35,49}]
wire _res_aligned_straddlesUpperBound_T_126 = _res_aligned_straddlesUpperBound_T_125 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:{49,62,77}]
wire [2:0] _res_aligned_straddlesUpperBound_T_133 = _res_aligned_straddlesUpperBound_T_132 | res_aligned_lsbMask_7; // @[package.scala:243:46]
wire res_aligned_pow2Aligned_7 = _res_aligned_pow2Aligned_T_23 == 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:{32,39,57}, :174:26]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File MSHR.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import freechips.rocketchip.tilelink._
import TLPermissions._
import TLMessages._
import MetaData._
import chisel3.PrintableHelper
import chisel3.experimental.dataview._
class ScheduleRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val a = Valid(new SourceARequest(params))
val b = Valid(new SourceBRequest(params))
val c = Valid(new SourceCRequest(params))
val d = Valid(new SourceDRequest(params))
val e = Valid(new SourceERequest(params))
val x = Valid(new SourceXRequest(params))
val dir = Valid(new DirectoryWrite(params))
val reload = Bool() // get next request via allocate (if any)
}
class MSHRStatus(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val way = UInt(params.wayBits.W)
val blockB = Bool()
val nestB = Bool()
val blockC = Bool()
val nestC = Bool()
}
class NestedWriteback(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val b_toN = Bool() // nested Probes may unhit us
val b_toB = Bool() // nested Probes may demote us
val b_clr_dirty = Bool() // nested Probes clear dirty
val c_set_dirty = Bool() // nested Releases MAY set dirty
}
sealed trait CacheState
{
val code = CacheState.index.U
CacheState.index = CacheState.index + 1
}
object CacheState
{
var index = 0
}
case object S_INVALID extends CacheState
case object S_BRANCH extends CacheState
case object S_BRANCH_C extends CacheState
case object S_TIP extends CacheState
case object S_TIP_C extends CacheState
case object S_TIP_CD extends CacheState
case object S_TIP_D extends CacheState
case object S_TRUNK_C extends CacheState
case object S_TRUNK_CD extends CacheState
class MSHR(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val allocate = Flipped(Valid(new AllocateRequest(params))) // refills MSHR for next cycle
val directory = Flipped(Valid(new DirectoryResult(params))) // triggers schedule setup
val status = Valid(new MSHRStatus(params))
val schedule = Decoupled(new ScheduleRequest(params))
val sinkc = Flipped(Valid(new SinkCResponse(params)))
val sinkd = Flipped(Valid(new SinkDResponse(params)))
val sinke = Flipped(Valid(new SinkEResponse(params)))
val nestedwb = Flipped(new NestedWriteback(params))
})
val request_valid = RegInit(false.B)
val request = Reg(new FullRequest(params))
val meta_valid = RegInit(false.B)
val meta = Reg(new DirectoryResult(params))
// Define which states are valid
when (meta_valid) {
when (meta.state === INVALID) {
assert (!meta.clients.orR)
assert (!meta.dirty)
}
when (meta.state === BRANCH) {
assert (!meta.dirty)
}
when (meta.state === TRUNK) {
assert (meta.clients.orR)
assert ((meta.clients & (meta.clients - 1.U)) === 0.U) // at most one
}
when (meta.state === TIP) {
// noop
}
}
// Completed transitions (s_ = scheduled), (w_ = waiting)
val s_rprobe = RegInit(true.B) // B
val w_rprobeackfirst = RegInit(true.B)
val w_rprobeacklast = RegInit(true.B)
val s_release = RegInit(true.B) // CW w_rprobeackfirst
val w_releaseack = RegInit(true.B)
val s_pprobe = RegInit(true.B) // B
val s_acquire = RegInit(true.B) // A s_release, s_pprobe [1]
val s_flush = RegInit(true.B) // X w_releaseack
val w_grantfirst = RegInit(true.B)
val w_grantlast = RegInit(true.B)
val w_grant = RegInit(true.B) // first | last depending on wormhole
val w_pprobeackfirst = RegInit(true.B)
val w_pprobeacklast = RegInit(true.B)
val w_pprobeack = RegInit(true.B) // first | last depending on wormhole
val s_probeack = RegInit(true.B) // C w_pprobeackfirst (mutually exclusive with next two s_*)
val s_grantack = RegInit(true.B) // E w_grantfirst ... CAN require both outE&inD to service outD
val s_execute = RegInit(true.B) // D w_pprobeack, w_grant
val w_grantack = RegInit(true.B)
val s_writeback = RegInit(true.B) // W w_*
// [1]: We cannot issue outer Acquire while holding blockB (=> outA can stall)
// However, inB and outC are higher priority than outB, so s_release and s_pprobe
// may be safely issued while blockB. Thus we must NOT try to schedule the
// potentially stuck s_acquire with either of them (scheduler is all or none).
// Meta-data that we discover underway
val sink = Reg(UInt(params.outer.bundle.sinkBits.W))
val gotT = Reg(Bool())
val bad_grant = Reg(Bool())
val probes_done = Reg(UInt(params.clientBits.W))
val probes_toN = Reg(UInt(params.clientBits.W))
val probes_noT = Reg(Bool())
// When a nested transaction completes, update our meta data
when (meta_valid && meta.state =/= INVALID &&
io.nestedwb.set === request.set && io.nestedwb.tag === meta.tag) {
when (io.nestedwb.b_clr_dirty) { meta.dirty := false.B }
when (io.nestedwb.c_set_dirty) { meta.dirty := true.B }
when (io.nestedwb.b_toB) { meta.state := BRANCH }
when (io.nestedwb.b_toN) { meta.hit := false.B }
}
// Scheduler status
io.status.valid := request_valid
io.status.bits.set := request.set
io.status.bits.tag := request.tag
io.status.bits.way := meta.way
io.status.bits.blockB := !meta_valid || ((!w_releaseack || !w_rprobeacklast || !w_pprobeacklast) && !w_grantfirst)
io.status.bits.nestB := meta_valid && w_releaseack && w_rprobeacklast && w_pprobeacklast && !w_grantfirst
// The above rules ensure we will block and not nest an outer probe while still doing our
// own inner probes. Thus every probe wakes exactly one MSHR.
io.status.bits.blockC := !meta_valid
io.status.bits.nestC := meta_valid && (!w_rprobeackfirst || !w_pprobeackfirst || !w_grantfirst)
// The w_grantfirst in nestC is necessary to deal with:
// acquire waiting for grant, inner release gets queued, outer probe -> inner probe -> deadlock
// ... this is possible because the release+probe can be for same set, but different tag
// We can only demand: block, nest, or queue
assert (!io.status.bits.nestB || !io.status.bits.blockB)
assert (!io.status.bits.nestC || !io.status.bits.blockC)
// Scheduler requests
val no_wait = w_rprobeacklast && w_releaseack && w_grantlast && w_pprobeacklast && w_grantack
io.schedule.bits.a.valid := !s_acquire && s_release && s_pprobe
io.schedule.bits.b.valid := !s_rprobe || !s_pprobe
io.schedule.bits.c.valid := (!s_release && w_rprobeackfirst) || (!s_probeack && w_pprobeackfirst)
io.schedule.bits.d.valid := !s_execute && w_pprobeack && w_grant
io.schedule.bits.e.valid := !s_grantack && w_grantfirst
io.schedule.bits.x.valid := !s_flush && w_releaseack
io.schedule.bits.dir.valid := (!s_release && w_rprobeackfirst) || (!s_writeback && no_wait)
io.schedule.bits.reload := no_wait
io.schedule.valid := io.schedule.bits.a.valid || io.schedule.bits.b.valid || io.schedule.bits.c.valid ||
io.schedule.bits.d.valid || io.schedule.bits.e.valid || io.schedule.bits.x.valid ||
io.schedule.bits.dir.valid
// Schedule completions
when (io.schedule.ready) {
s_rprobe := true.B
when (w_rprobeackfirst) { s_release := true.B }
s_pprobe := true.B
when (s_release && s_pprobe) { s_acquire := true.B }
when (w_releaseack) { s_flush := true.B }
when (w_pprobeackfirst) { s_probeack := true.B }
when (w_grantfirst) { s_grantack := true.B }
when (w_pprobeack && w_grant) { s_execute := true.B }
when (no_wait) { s_writeback := true.B }
// Await the next operation
when (no_wait) {
request_valid := false.B
meta_valid := false.B
}
}
// Resulting meta-data
val final_meta_writeback = WireInit(meta)
val req_clientBit = params.clientBit(request.source)
val req_needT = needT(request.opcode, request.param)
val req_acquire = request.opcode === AcquireBlock || request.opcode === AcquirePerm
val meta_no_clients = !meta.clients.orR
val req_promoteT = req_acquire && Mux(meta.hit, meta_no_clients && meta.state === TIP, gotT)
when (request.prio(2) && (!params.firstLevel).B) { // always a hit
final_meta_writeback.dirty := meta.dirty || request.opcode(0)
final_meta_writeback.state := Mux(request.param =/= TtoT && meta.state === TRUNK, TIP, meta.state)
final_meta_writeback.clients := meta.clients & ~Mux(isToN(request.param), req_clientBit, 0.U)
final_meta_writeback.hit := true.B // chained requests are hits
} .elsewhen (request.control && params.control.B) { // request.prio(0)
when (meta.hit) {
final_meta_writeback.dirty := false.B
final_meta_writeback.state := INVALID
final_meta_writeback.clients := meta.clients & ~probes_toN
}
final_meta_writeback.hit := false.B
} .otherwise {
final_meta_writeback.dirty := (meta.hit && meta.dirty) || !request.opcode(2)
final_meta_writeback.state := Mux(req_needT,
Mux(req_acquire, TRUNK, TIP),
Mux(!meta.hit, Mux(gotT, Mux(req_acquire, TRUNK, TIP), BRANCH),
MuxLookup(meta.state, 0.U(2.W))(Seq(
INVALID -> BRANCH,
BRANCH -> BRANCH,
TRUNK -> TIP,
TIP -> Mux(meta_no_clients && req_acquire, TRUNK, TIP)))))
final_meta_writeback.clients := Mux(meta.hit, meta.clients & ~probes_toN, 0.U) |
Mux(req_acquire, req_clientBit, 0.U)
final_meta_writeback.tag := request.tag
final_meta_writeback.hit := true.B
}
when (bad_grant) {
when (meta.hit) {
// upgrade failed (B -> T)
assert (!meta_valid || meta.state === BRANCH)
final_meta_writeback.hit := true.B
final_meta_writeback.dirty := false.B
final_meta_writeback.state := BRANCH
final_meta_writeback.clients := meta.clients & ~probes_toN
} .otherwise {
// failed N -> (T or B)
final_meta_writeback.hit := false.B
final_meta_writeback.dirty := false.B
final_meta_writeback.state := INVALID
final_meta_writeback.clients := 0.U
}
}
val invalid = Wire(new DirectoryEntry(params))
invalid.dirty := false.B
invalid.state := INVALID
invalid.clients := 0.U
invalid.tag := 0.U
// Just because a client says BtoT, by the time we process the request he may be N.
// Therefore, we must consult our own meta-data state to confirm he owns the line still.
val honour_BtoT = meta.hit && (meta.clients & req_clientBit).orR
// The client asking us to act is proof they don't have permissions.
val excluded_client = Mux(meta.hit && request.prio(0) && skipProbeN(request.opcode, params.cache.hintsSkipProbe), req_clientBit, 0.U)
io.schedule.bits.a.bits.tag := request.tag
io.schedule.bits.a.bits.set := request.set
io.schedule.bits.a.bits.param := Mux(req_needT, Mux(meta.hit, BtoT, NtoT), NtoB)
io.schedule.bits.a.bits.block := request.size =/= log2Ceil(params.cache.blockBytes).U ||
!(request.opcode === PutFullData || request.opcode === AcquirePerm)
io.schedule.bits.a.bits.source := 0.U
io.schedule.bits.b.bits.param := Mux(!s_rprobe, toN, Mux(request.prio(1), request.param, Mux(req_needT, toN, toB)))
io.schedule.bits.b.bits.tag := Mux(!s_rprobe, meta.tag, request.tag)
io.schedule.bits.b.bits.set := request.set
io.schedule.bits.b.bits.clients := meta.clients & ~excluded_client
io.schedule.bits.c.bits.opcode := Mux(meta.dirty, ReleaseData, Release)
io.schedule.bits.c.bits.param := Mux(meta.state === BRANCH, BtoN, TtoN)
io.schedule.bits.c.bits.source := 0.U
io.schedule.bits.c.bits.tag := meta.tag
io.schedule.bits.c.bits.set := request.set
io.schedule.bits.c.bits.way := meta.way
io.schedule.bits.c.bits.dirty := meta.dirty
io.schedule.bits.d.bits.viewAsSupertype(chiselTypeOf(request)) := request
io.schedule.bits.d.bits.param := Mux(!req_acquire, request.param,
MuxLookup(request.param, request.param)(Seq(
NtoB -> Mux(req_promoteT, NtoT, NtoB),
BtoT -> Mux(honour_BtoT, BtoT, NtoT),
NtoT -> NtoT)))
io.schedule.bits.d.bits.sink := 0.U
io.schedule.bits.d.bits.way := meta.way
io.schedule.bits.d.bits.bad := bad_grant
io.schedule.bits.e.bits.sink := sink
io.schedule.bits.x.bits.fail := false.B
io.schedule.bits.dir.bits.set := request.set
io.schedule.bits.dir.bits.way := meta.way
io.schedule.bits.dir.bits.data := Mux(!s_release, invalid, WireInit(new DirectoryEntry(params), init = final_meta_writeback))
// Coverage of state transitions
def cacheState(entry: DirectoryEntry, hit: Bool) = {
val out = WireDefault(0.U)
val c = entry.clients.orR
val d = entry.dirty
switch (entry.state) {
is (BRANCH) { out := Mux(c, S_BRANCH_C.code, S_BRANCH.code) }
is (TRUNK) { out := Mux(d, S_TRUNK_CD.code, S_TRUNK_C.code) }
is (TIP) { out := Mux(c, Mux(d, S_TIP_CD.code, S_TIP_C.code), Mux(d, S_TIP_D.code, S_TIP.code)) }
is (INVALID) { out := S_INVALID.code }
}
when (!hit) { out := S_INVALID.code }
out
}
val p = !params.lastLevel // can be probed
val c = !params.firstLevel // can be acquired
val m = params.inner.client.clients.exists(!_.supports.probe) // can be written (or read)
val r = params.outer.manager.managers.exists(!_.alwaysGrantsT) // read-only devices exist
val f = params.control // flush control register exists
val cfg = (p, c, m, r, f)
val b = r || p // can reach branch state (via probe downgrade or read-only device)
// The cache must be used for something or we would not be here
require(c || m)
val evict = cacheState(meta, !meta.hit)
val before = cacheState(meta, meta.hit)
val after = cacheState(final_meta_writeback, true.B)
def eviction(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(evict === from.code, s"MSHR_${from}_EVICT", s"State transition from ${from} to evicted ${cfg}")
} else {
assert(!(evict === from.code), cf"State transition from ${from} to evicted should be impossible ${cfg}")
}
if (cover && f) {
params.ccover(before === from.code, s"MSHR_${from}_FLUSH", s"State transition from ${from} to flushed ${cfg}")
} else {
assert(!(before === from.code), cf"State transition from ${from} to flushed should be impossible ${cfg}")
}
}
def transition(from: CacheState, to: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(before === from.code && after === to.code, s"MSHR_${from}_${to}", s"State transition from ${from} to ${to} ${cfg}")
} else {
assert(!(before === from.code && after === to.code), cf"State transition from ${from} to ${to} should be impossible ${cfg}")
}
}
when ((!s_release && w_rprobeackfirst) && io.schedule.ready) {
eviction(S_BRANCH, b) // MMIO read to read-only device
eviction(S_BRANCH_C, b && c) // you need children to become C
eviction(S_TIP, true) // MMIO read || clean release can lead to this state
eviction(S_TIP_C, c) // needs two clients || client + mmio || downgrading client
eviction(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client
eviction(S_TIP_D, true) // MMIO write || dirty release lead here
eviction(S_TRUNK_C, c) // acquire for write
eviction(S_TRUNK_CD, c) // dirty release then reacquire
}
when ((!s_writeback && no_wait) && io.schedule.ready) {
transition(S_INVALID, S_BRANCH, b && m) // only MMIO can bring us to BRANCH state
transition(S_INVALID, S_BRANCH_C, b && c) // C state is only possible if there are inner caches
transition(S_INVALID, S_TIP, m) // MMIO read
transition(S_INVALID, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_INVALID, S_TIP_CD, false) // acquire does not cause dirty immediately
transition(S_INVALID, S_TIP_D, m) // MMIO write
transition(S_INVALID, S_TRUNK_C, c) // acquire
transition(S_INVALID, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH, S_INVALID, b && p) // probe can do this (flushes run as evictions)
transition(S_BRANCH, S_BRANCH_C, b && c) // acquire
transition(S_BRANCH, S_TIP, b && m) // prefetch write
transition(S_BRANCH, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_BRANCH, S_TIP_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH, S_TIP_D, b && m) // MMIO write
transition(S_BRANCH, S_TRUNK_C, b && c) // acquire
transition(S_BRANCH, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH_C, S_INVALID, b && c && p)
transition(S_BRANCH_C, S_BRANCH, b && c) // clean release (optional)
transition(S_BRANCH_C, S_TIP, b && c && m) // prefetch write
transition(S_BRANCH_C, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_BRANCH_C, S_TIP_D, b && c && m) // MMIO write
transition(S_BRANCH_C, S_TIP_CD, false) // going dirty means we must shoot down clients
transition(S_BRANCH_C, S_TRUNK_C, b && c) // acquire
transition(S_BRANCH_C, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_TIP, S_INVALID, p)
transition(S_TIP, S_BRANCH, p) // losing TIP only possible via probe
transition(S_TIP, S_BRANCH_C, false) // we would go S_TRUNK_C instead
transition(S_TIP, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP, S_TIP_D, m) // direct dirty only via MMIO write
transition(S_TIP, S_TIP_CD, false) // acquire does not make us dirty immediately
transition(S_TIP, S_TRUNK_C, c) // acquire
transition(S_TIP, S_TRUNK_CD, false) // acquire does not make us dirty immediately
transition(S_TIP_C, S_INVALID, c && p)
transition(S_TIP_C, S_BRANCH, c && p) // losing TIP only possible via probe
transition(S_TIP_C, S_BRANCH_C, c && p) // losing TIP only possible via probe
transition(S_TIP_C, S_TIP, c) // probed while MMIO read || clean release (optional)
transition(S_TIP_C, S_TIP_D, c && m) // direct dirty only via MMIO write
transition(S_TIP_C, S_TIP_CD, false) // going dirty means we must shoot down clients
transition(S_TIP_C, S_TRUNK_C, c) // acquire
transition(S_TIP_C, S_TRUNK_CD, false) // acquire does not make us immediately dirty
transition(S_TIP_D, S_INVALID, p)
transition(S_TIP_D, S_BRANCH, p) // losing D is only possible via probe
transition(S_TIP_D, S_BRANCH_C, p && c) // probed while acquire shared
transition(S_TIP_D, S_TIP, p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TIP_D, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP_D, S_TIP_CD, false) // we would go S_TRUNK_CD instead
transition(S_TIP_D, S_TRUNK_C, p && c) // probed while acquired
transition(S_TIP_D, S_TRUNK_CD, c) // acquire
transition(S_TIP_CD, S_INVALID, c && p)
transition(S_TIP_CD, S_BRANCH, c && p) // losing D is only possible via probe
transition(S_TIP_CD, S_BRANCH_C, c && p) // losing D is only possible via probe
transition(S_TIP_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TIP_CD, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP_CD, S_TIP_D, c) // MMIO write || clean release (optional)
transition(S_TIP_CD, S_TRUNK_C, c && p) // probed while acquire
transition(S_TIP_CD, S_TRUNK_CD, c) // acquire
transition(S_TRUNK_C, S_INVALID, c && p)
transition(S_TRUNK_C, S_BRANCH, c && p) // losing TIP only possible via probe
transition(S_TRUNK_C, S_BRANCH_C, c && p) // losing TIP only possible via probe
transition(S_TRUNK_C, S_TIP, c) // MMIO read || clean release (optional)
transition(S_TRUNK_C, S_TIP_C, c) // bounce shared
transition(S_TRUNK_C, S_TIP_D, c) // dirty release
transition(S_TRUNK_C, S_TIP_CD, c) // dirty bounce shared
transition(S_TRUNK_C, S_TRUNK_CD, c) // dirty bounce
transition(S_TRUNK_CD, S_INVALID, c && p)
transition(S_TRUNK_CD, S_BRANCH, c && p) // losing D only possible via probe
transition(S_TRUNK_CD, S_BRANCH_C, c && p) // losing D only possible via probe
transition(S_TRUNK_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TRUNK_CD, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TRUNK_CD, S_TIP_D, c) // dirty release
transition(S_TRUNK_CD, S_TIP_CD, c) // bounce shared
transition(S_TRUNK_CD, S_TRUNK_C, c && p) // probed while acquire
}
// Handle response messages
val probe_bit = params.clientBit(io.sinkc.bits.source)
val last_probe = (probes_done | probe_bit) === (meta.clients & ~excluded_client)
val probe_toN = isToN(io.sinkc.bits.param)
if (!params.firstLevel) when (io.sinkc.valid) {
params.ccover( probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_FULL", "Client downgraded to N when asked only to do B")
params.ccover(!probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_HALF", "Client downgraded to B when asked only to do B")
// Caution: the probe matches us only in set.
// We would never allow an outer probe to nest until both w_[rp]probeack complete, so
// it is safe to just unguardedly update the probe FSM.
probes_done := probes_done | probe_bit
probes_toN := probes_toN | Mux(probe_toN, probe_bit, 0.U)
probes_noT := probes_noT || io.sinkc.bits.param =/= TtoT
w_rprobeackfirst := w_rprobeackfirst || last_probe
w_rprobeacklast := w_rprobeacklast || (last_probe && io.sinkc.bits.last)
w_pprobeackfirst := w_pprobeackfirst || last_probe
w_pprobeacklast := w_pprobeacklast || (last_probe && io.sinkc.bits.last)
// Allow wormhole routing from sinkC if the first request beat has offset 0
val set_pprobeack = last_probe && (io.sinkc.bits.last || request.offset === 0.U)
w_pprobeack := w_pprobeack || set_pprobeack
params.ccover(!set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_SERIAL", "Sequential routing of probe response data")
params.ccover( set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_WORMHOLE", "Wormhole routing of probe response data")
// However, meta-data updates need to be done more cautiously
when (meta.state =/= INVALID && io.sinkc.bits.tag === meta.tag && io.sinkc.bits.data) { meta.dirty := true.B } // !!!
}
when (io.sinkd.valid) {
when (io.sinkd.bits.opcode === Grant || io.sinkd.bits.opcode === GrantData) {
sink := io.sinkd.bits.sink
w_grantfirst := true.B
w_grantlast := io.sinkd.bits.last
// Record if we need to prevent taking ownership
bad_grant := io.sinkd.bits.denied
// Allow wormhole routing for requests whose first beat has offset 0
w_grant := request.offset === 0.U || io.sinkd.bits.last
params.ccover(io.sinkd.bits.opcode === GrantData && request.offset === 0.U, "MSHR_GRANT_WORMHOLE", "Wormhole routing of grant response data")
params.ccover(io.sinkd.bits.opcode === GrantData && request.offset =/= 0.U, "MSHR_GRANT_SERIAL", "Sequential routing of grant response data")
gotT := io.sinkd.bits.param === toT
}
.elsewhen (io.sinkd.bits.opcode === ReleaseAck) {
w_releaseack := true.B
}
}
when (io.sinke.valid) {
w_grantack := true.B
}
// Bootstrap new requests
val allocate_as_full = WireInit(new FullRequest(params), init = io.allocate.bits)
val new_meta = Mux(io.allocate.valid && io.allocate.bits.repeat, final_meta_writeback, io.directory.bits)
val new_request = Mux(io.allocate.valid, allocate_as_full, request)
val new_needT = needT(new_request.opcode, new_request.param)
val new_clientBit = params.clientBit(new_request.source)
val new_skipProbe = Mux(skipProbeN(new_request.opcode, params.cache.hintsSkipProbe), new_clientBit, 0.U)
val prior = cacheState(final_meta_writeback, true.B)
def bypass(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(prior === from.code, s"MSHR_${from}_BYPASS", s"State bypass transition from ${from} ${cfg}")
} else {
assert(!(prior === from.code), cf"State bypass from ${from} should be impossible ${cfg}")
}
}
when (io.allocate.valid && io.allocate.bits.repeat) {
bypass(S_INVALID, f || p) // Can lose permissions (probe/flush)
bypass(S_BRANCH, b) // MMIO read to read-only device
bypass(S_BRANCH_C, b && c) // you need children to become C
bypass(S_TIP, true) // MMIO read || clean release can lead to this state
bypass(S_TIP_C, c) // needs two clients || client + mmio || downgrading client
bypass(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client
bypass(S_TIP_D, true) // MMIO write || dirty release lead here
bypass(S_TRUNK_C, c) // acquire for write
bypass(S_TRUNK_CD, c) // dirty release then reacquire
}
when (io.allocate.valid) {
assert (!request_valid || (no_wait && io.schedule.fire))
request_valid := true.B
request := io.allocate.bits
}
// Create execution plan
when (io.directory.valid || (io.allocate.valid && io.allocate.bits.repeat)) {
meta_valid := true.B
meta := new_meta
probes_done := 0.U
probes_toN := 0.U
probes_noT := false.B
gotT := false.B
bad_grant := false.B
// These should already be either true or turning true
// We clear them here explicitly to simplify the mux tree
s_rprobe := true.B
w_rprobeackfirst := true.B
w_rprobeacklast := true.B
s_release := true.B
w_releaseack := true.B
s_pprobe := true.B
s_acquire := true.B
s_flush := true.B
w_grantfirst := true.B
w_grantlast := true.B
w_grant := true.B
w_pprobeackfirst := true.B
w_pprobeacklast := true.B
w_pprobeack := true.B
s_probeack := true.B
s_grantack := true.B
s_execute := true.B
w_grantack := true.B
s_writeback := true.B
// For C channel requests (ie: Release[Data])
when (new_request.prio(2) && (!params.firstLevel).B) {
s_execute := false.B
// Do we need to go dirty?
when (new_request.opcode(0) && !new_meta.dirty) {
s_writeback := false.B
}
// Does our state change?
when (isToB(new_request.param) && new_meta.state === TRUNK) {
s_writeback := false.B
}
// Do our clients change?
when (isToN(new_request.param) && (new_meta.clients & new_clientBit) =/= 0.U) {
s_writeback := false.B
}
assert (new_meta.hit)
}
// For X channel requests (ie: flush)
.elsewhen (new_request.control && params.control.B) { // new_request.prio(0)
s_flush := false.B
// Do we need to actually do something?
when (new_meta.hit) {
s_release := false.B
w_releaseack := false.B
// Do we need to shoot-down inner caches?
when ((!params.firstLevel).B && (new_meta.clients =/= 0.U)) {
s_rprobe := false.B
w_rprobeackfirst := false.B
w_rprobeacklast := false.B
}
}
}
// For A channel requests
.otherwise { // new_request.prio(0) && !new_request.control
s_execute := false.B
// Do we need an eviction?
when (!new_meta.hit && new_meta.state =/= INVALID) {
s_release := false.B
w_releaseack := false.B
// Do we need to shoot-down inner caches?
when ((!params.firstLevel).B & (new_meta.clients =/= 0.U)) {
s_rprobe := false.B
w_rprobeackfirst := false.B
w_rprobeacklast := false.B
}
}
// Do we need an acquire?
when (!new_meta.hit || (new_meta.state === BRANCH && new_needT)) {
s_acquire := false.B
w_grantfirst := false.B
w_grantlast := false.B
w_grant := false.B
s_grantack := false.B
s_writeback := false.B
}
// Do we need a probe?
when ((!params.firstLevel).B && (new_meta.hit &&
(new_needT || new_meta.state === TRUNK) &&
(new_meta.clients & ~new_skipProbe) =/= 0.U)) {
s_pprobe := false.B
w_pprobeackfirst := false.B
w_pprobeacklast := false.B
w_pprobeack := false.B
s_writeback := false.B
}
// Do we need a grantack?
when (new_request.opcode === AcquireBlock || new_request.opcode === AcquirePerm) {
w_grantack := false.B
s_writeback := false.B
}
// Becomes dirty?
when (!new_request.opcode(2) && new_meta.hit && !new_meta.dirty) {
s_writeback := false.B
}
}
}
}
File Parameters.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property.cover
import scala.math.{min,max}
case class CacheParameters(
level: Int,
ways: Int,
sets: Int,
blockBytes: Int,
beatBytes: Int, // inner
hintsSkipProbe: Boolean)
{
require (ways > 0)
require (sets > 0)
require (blockBytes > 0 && isPow2(blockBytes))
require (beatBytes > 0 && isPow2(beatBytes))
require (blockBytes >= beatBytes)
val blocks = ways * sets
val sizeBytes = blocks * blockBytes
val blockBeats = blockBytes/beatBytes
}
case class InclusiveCachePortParameters(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e))
}
object InclusiveCachePortParameters
{
val none = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.none)
val full = InclusiveCachePortParameters(
a = BufferParams.default,
b = BufferParams.default,
c = BufferParams.default,
d = BufferParams.default,
e = BufferParams.default)
// This removes feed-through paths from C=>A and A=>C
val fullC = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.default,
d = BufferParams.none,
e = BufferParams.none)
val flowAD = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.flow,
e = BufferParams.none)
val flowAE = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.flow)
// For innerBuf:
// SinkA: no restrictions, flows into scheduler+putbuffer
// SourceB: no restrictions, flows out of scheduler
// sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore
// SourceD: no restrictions, flows out of bankedStore/regout
// SinkE: no restrictions, flows into scheduler
//
// ... so while none is possible, you probably want at least flowAC to cut ready
// from the scheduler delay and flowD to ease SourceD back-pressure
// For outerBufer:
// SourceA: must not be pipe, flows out of scheduler
// SinkB: no restrictions, flows into scheduler
// SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored
// SinkD: no restrictions, flows into scheduler & bankedStore
// SourceE: must not be pipe, flows out of scheduler
//
// ... AE take the channel ready into the scheduler, so you need at least flowAE
}
case class InclusiveCacheMicroParameters(
writeBytes: Int, // backing store update granularity
memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz)
portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes
dirReg: Boolean = false,
innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none
outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE
{
require (writeBytes > 0 && isPow2(writeBytes))
require (memCycles > 0)
require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant
}
case class InclusiveCacheControlParameters(
address: BigInt,
beatBytes: Int,
bankedControl: Boolean)
case class InclusiveCacheParameters(
cache: CacheParameters,
micro: InclusiveCacheMicroParameters,
control: Boolean,
inner: TLEdgeIn,
outer: TLEdgeOut)(implicit val p: Parameters)
{
require (cache.ways > 1)
require (cache.sets > 1 && isPow2(cache.sets))
require (micro.writeBytes <= inner.manager.beatBytes)
require (micro.writeBytes <= outer.manager.beatBytes)
require (inner.manager.beatBytes <= cache.blockBytes)
require (outer.manager.beatBytes <= cache.blockBytes)
// Require that all cached address ranges have contiguous blocks
outer.manager.managers.flatMap(_.address).foreach { a =>
require (a.alignment >= cache.blockBytes)
}
// If we are the first level cache, we do not need to support inner-BCE
val firstLevel = !inner.client.clients.exists(_.supports.probe)
// If we are the last level cache, we do not need to support outer-B
val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED)
require (lastLevel)
// Provision enough resources to achieve full throughput with missing single-beat accesses
val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro)
val secondary = max(mshrs, micro.memCycles - mshrs)
val putLists = micro.memCycles // allow every request to be single beat
val putBeats = max(2*cache.blockBeats, micro.memCycles)
val relLists = 2
val relBeats = relLists*cache.blockBeats
val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address))
val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_))
def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] =
if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail)
val addressMapping = bitOffsets(pickMask)
val addressBits = addressMapping.size
// println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}")
val allClients = inner.client.clients.size
val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size
val clientBits = max(1, clientBitsRaw)
val stateBits = 2
val wayBits = log2Ceil(cache.ways)
val setBits = log2Ceil(cache.sets)
val offsetBits = log2Ceil(cache.blockBytes)
val tagBits = addressBits - setBits - offsetBits
val putBits = log2Ceil(max(putLists, relLists))
require (tagBits > 0)
require (offsetBits > 0)
val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1
val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1
val innerMaskBits = inner.manager.beatBytes / micro.writeBytes
val outerMaskBits = outer.manager.beatBytes / micro.writeBytes
def clientBit(source: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse)
}
}
def clientSource(bit: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U))
}
}
def parseAddress(x: UInt): (UInt, UInt, UInt) = {
val offset = Cat(addressMapping.map(o => x(o,o)).reverse)
val set = offset >> offsetBits
val tag = set >> setBits
(tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0))
}
def widen(x: UInt, width: Int): UInt = {
val y = x | 0.U(width.W)
assert (y >> width === 0.U)
y(width-1, 0)
}
def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = {
val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits))
val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) }
addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) }
Cat(bits.reverse)
}
def restoreAddress(expanded: UInt): UInt = {
val missingBits = flatAddresses
.map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match
.groupBy(_._1)
.view
.mapValues(_.map(_._2))
val muxMask = AddressDecoder(missingBits.values.toList)
val mux = missingBits.toList.map { case (bits, addrs) =>
val widen = addrs.map(_.widen(~muxMask))
val matches = AddressSet
.unify(widen.distinct)
.map(_.contains(expanded))
.reduce(_ || _)
(matches, bits.U)
}
expanded | Mux1H(mux)
}
def dirReg[T <: Data](x: T, en: Bool = true.B): T = {
if (micro.dirReg) RegEnable(x, en) else x
}
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc)
}
object MetaData
{
val stateBits = 2
def INVALID: UInt = 0.U(stateBits.W) // way is empty
def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk
def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk
def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch
// Does a request need trunk?
def needT(opcode: UInt, param: UInt): Bool = {
!opcode(2) ||
(opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) ||
((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB)
}
// Does a request prove the client need not be probed?
def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = {
// Acquire(toB) and Get => is N, so no probe
// Acquire(*toT) => is N or B, but need T, so no probe
// Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client
// Put* => is N or B, so probe IS needed
opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B)
}
def isToN(param: UInt): Bool = {
param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN
}
def isToB(param: UInt): Bool = {
param === TLPermissions.TtoB || param === TLPermissions.BtoB
}
}
object InclusiveCacheParameters
{
val lfsrBits = 10
val L2ControlAddress = 0x2010000
val L2ControlSize = 0x1000
def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = {
// We need 2-3 normal MSHRs to cover the Directory latency
// To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs
max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats)
}
def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int =
// We need a dedicated MSHR for B+C each
2 + out_mshrs(cache, micro)
}
class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle
| module MSHR_2( // @[MSHR.scala:84:7]
input clock, // @[MSHR.scala:84:7]
input reset, // @[MSHR.scala:84:7]
input io_allocate_valid, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_0, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_1, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_2, // @[MSHR.scala:86:14]
input io_allocate_bits_control, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_opcode, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_param, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_size, // @[MSHR.scala:86:14]
input [6:0] io_allocate_bits_source, // @[MSHR.scala:86:14]
input [12:0] io_allocate_bits_tag, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_offset, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_put, // @[MSHR.scala:86:14]
input [9:0] io_allocate_bits_set, // @[MSHR.scala:86:14]
input io_allocate_bits_repeat, // @[MSHR.scala:86:14]
input io_directory_valid, // @[MSHR.scala:86:14]
input io_directory_bits_dirty, // @[MSHR.scala:86:14]
input [1:0] io_directory_bits_state, // @[MSHR.scala:86:14]
input io_directory_bits_clients, // @[MSHR.scala:86:14]
input [12:0] io_directory_bits_tag, // @[MSHR.scala:86:14]
input io_directory_bits_hit, // @[MSHR.scala:86:14]
input [2:0] io_directory_bits_way, // @[MSHR.scala:86:14]
output io_status_valid, // @[MSHR.scala:86:14]
output [9:0] io_status_bits_set, // @[MSHR.scala:86:14]
output [12:0] io_status_bits_tag, // @[MSHR.scala:86:14]
output [2:0] io_status_bits_way, // @[MSHR.scala:86:14]
output io_status_bits_blockB, // @[MSHR.scala:86:14]
output io_status_bits_nestB, // @[MSHR.scala:86:14]
output io_status_bits_blockC, // @[MSHR.scala:86:14]
output io_status_bits_nestC, // @[MSHR.scala:86:14]
input io_schedule_ready, // @[MSHR.scala:86:14]
output io_schedule_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_a_valid, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_a_bits_tag, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_a_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_a_bits_param, // @[MSHR.scala:86:14]
output io_schedule_bits_a_bits_block, // @[MSHR.scala:86:14]
output io_schedule_bits_b_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_b_bits_param, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_b_bits_tag, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_b_bits_set, // @[MSHR.scala:86:14]
output io_schedule_bits_b_bits_clients, // @[MSHR.scala:86:14]
output io_schedule_bits_c_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_opcode, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_param, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_c_bits_tag, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_c_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_c_bits_dirty, // @[MSHR.scala:86:14]
output io_schedule_bits_d_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_0, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_1, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_2, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_control, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_opcode, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_param, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_size, // @[MSHR.scala:86:14]
output [6:0] io_schedule_bits_d_bits_source, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_d_bits_tag, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_offset, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_put, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_d_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_bad, // @[MSHR.scala:86:14]
output io_schedule_bits_e_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_e_bits_sink, // @[MSHR.scala:86:14]
output io_schedule_bits_x_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_valid, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_dir_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_dir_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_bits_data_dirty, // @[MSHR.scala:86:14]
output [1:0] io_schedule_bits_dir_bits_data_state, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_bits_data_clients, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_dir_bits_data_tag, // @[MSHR.scala:86:14]
output io_schedule_bits_reload, // @[MSHR.scala:86:14]
input io_sinkc_valid, // @[MSHR.scala:86:14]
input io_sinkc_bits_last, // @[MSHR.scala:86:14]
input [9:0] io_sinkc_bits_set, // @[MSHR.scala:86:14]
input [12:0] io_sinkc_bits_tag, // @[MSHR.scala:86:14]
input [6:0] io_sinkc_bits_source, // @[MSHR.scala:86:14]
input [2:0] io_sinkc_bits_param, // @[MSHR.scala:86:14]
input io_sinkc_bits_data, // @[MSHR.scala:86:14]
input io_sinkd_valid, // @[MSHR.scala:86:14]
input io_sinkd_bits_last, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_opcode, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_param, // @[MSHR.scala:86:14]
input [3:0] io_sinkd_bits_source, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_sink, // @[MSHR.scala:86:14]
input io_sinkd_bits_denied, // @[MSHR.scala:86:14]
input io_sinke_valid, // @[MSHR.scala:86:14]
input [3:0] io_sinke_bits_sink, // @[MSHR.scala:86:14]
input [9:0] io_nestedwb_set, // @[MSHR.scala:86:14]
input [12:0] io_nestedwb_tag, // @[MSHR.scala:86:14]
input io_nestedwb_b_toN, // @[MSHR.scala:86:14]
input io_nestedwb_b_toB, // @[MSHR.scala:86:14]
input io_nestedwb_b_clr_dirty, // @[MSHR.scala:86:14]
input io_nestedwb_c_set_dirty // @[MSHR.scala:86:14]
);
wire [12:0] final_meta_writeback_tag; // @[MSHR.scala:215:38]
wire final_meta_writeback_clients; // @[MSHR.scala:215:38]
wire [1:0] final_meta_writeback_state; // @[MSHR.scala:215:38]
wire final_meta_writeback_dirty; // @[MSHR.scala:215:38]
wire io_allocate_valid_0 = io_allocate_valid; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_0_0 = io_allocate_bits_prio_0; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_1_0 = io_allocate_bits_prio_1; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_2_0 = io_allocate_bits_prio_2; // @[MSHR.scala:84:7]
wire io_allocate_bits_control_0 = io_allocate_bits_control; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_opcode_0 = io_allocate_bits_opcode; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_param_0 = io_allocate_bits_param; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_size_0 = io_allocate_bits_size; // @[MSHR.scala:84:7]
wire [6:0] io_allocate_bits_source_0 = io_allocate_bits_source; // @[MSHR.scala:84:7]
wire [12:0] io_allocate_bits_tag_0 = io_allocate_bits_tag; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_offset_0 = io_allocate_bits_offset; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_put_0 = io_allocate_bits_put; // @[MSHR.scala:84:7]
wire [9:0] io_allocate_bits_set_0 = io_allocate_bits_set; // @[MSHR.scala:84:7]
wire io_allocate_bits_repeat_0 = io_allocate_bits_repeat; // @[MSHR.scala:84:7]
wire io_directory_valid_0 = io_directory_valid; // @[MSHR.scala:84:7]
wire io_directory_bits_dirty_0 = io_directory_bits_dirty; // @[MSHR.scala:84:7]
wire [1:0] io_directory_bits_state_0 = io_directory_bits_state; // @[MSHR.scala:84:7]
wire io_directory_bits_clients_0 = io_directory_bits_clients; // @[MSHR.scala:84:7]
wire [12:0] io_directory_bits_tag_0 = io_directory_bits_tag; // @[MSHR.scala:84:7]
wire io_directory_bits_hit_0 = io_directory_bits_hit; // @[MSHR.scala:84:7]
wire [2:0] io_directory_bits_way_0 = io_directory_bits_way; // @[MSHR.scala:84:7]
wire io_schedule_ready_0 = io_schedule_ready; // @[MSHR.scala:84:7]
wire io_sinkc_valid_0 = io_sinkc_valid; // @[MSHR.scala:84:7]
wire io_sinkc_bits_last_0 = io_sinkc_bits_last; // @[MSHR.scala:84:7]
wire [9:0] io_sinkc_bits_set_0 = io_sinkc_bits_set; // @[MSHR.scala:84:7]
wire [12:0] io_sinkc_bits_tag_0 = io_sinkc_bits_tag; // @[MSHR.scala:84:7]
wire [6:0] io_sinkc_bits_source_0 = io_sinkc_bits_source; // @[MSHR.scala:84:7]
wire [2:0] io_sinkc_bits_param_0 = io_sinkc_bits_param; // @[MSHR.scala:84:7]
wire io_sinkc_bits_data_0 = io_sinkc_bits_data; // @[MSHR.scala:84:7]
wire io_sinkd_valid_0 = io_sinkd_valid; // @[MSHR.scala:84:7]
wire io_sinkd_bits_last_0 = io_sinkd_bits_last; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_opcode_0 = io_sinkd_bits_opcode; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_param_0 = io_sinkd_bits_param; // @[MSHR.scala:84:7]
wire [3:0] io_sinkd_bits_source_0 = io_sinkd_bits_source; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_sink_0 = io_sinkd_bits_sink; // @[MSHR.scala:84:7]
wire io_sinkd_bits_denied_0 = io_sinkd_bits_denied; // @[MSHR.scala:84:7]
wire io_sinke_valid_0 = io_sinke_valid; // @[MSHR.scala:84:7]
wire [3:0] io_sinke_bits_sink_0 = io_sinke_bits_sink; // @[MSHR.scala:84:7]
wire [9:0] io_nestedwb_set_0 = io_nestedwb_set; // @[MSHR.scala:84:7]
wire [12:0] io_nestedwb_tag_0 = io_nestedwb_tag; // @[MSHR.scala:84:7]
wire io_nestedwb_b_toN_0 = io_nestedwb_b_toN; // @[MSHR.scala:84:7]
wire io_nestedwb_b_toB_0 = io_nestedwb_b_toB; // @[MSHR.scala:84:7]
wire io_nestedwb_b_clr_dirty_0 = io_nestedwb_b_clr_dirty; // @[MSHR.scala:84:7]
wire io_nestedwb_c_set_dirty_0 = io_nestedwb_c_set_dirty; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_a_bits_source = 4'h0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_c_bits_source = 4'h0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_d_bits_sink = 4'h0; // @[MSHR.scala:84:7]
wire io_schedule_bits_x_bits_fail = 1'h0; // @[MSHR.scala:84:7]
wire _io_schedule_bits_c_valid_T_2 = 1'h0; // @[MSHR.scala:186:68]
wire _io_schedule_bits_c_valid_T_3 = 1'h0; // @[MSHR.scala:186:80]
wire invalid_dirty = 1'h0; // @[MSHR.scala:268:21]
wire invalid_clients = 1'h0; // @[MSHR.scala:268:21]
wire _excluded_client_T_7 = 1'h0; // @[Parameters.scala:279:137]
wire _after_T_4 = 1'h0; // @[MSHR.scala:323:11]
wire _new_skipProbe_T_6 = 1'h0; // @[Parameters.scala:279:137]
wire _prior_T_4 = 1'h0; // @[MSHR.scala:323:11]
wire [12:0] invalid_tag = 13'h0; // @[MSHR.scala:268:21]
wire [1:0] invalid_state = 2'h0; // @[MSHR.scala:268:21]
wire [1:0] _final_meta_writeback_state_T_11 = 2'h1; // @[MSHR.scala:240:70]
wire allocate_as_full_prio_0 = io_allocate_bits_prio_0_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_prio_1 = io_allocate_bits_prio_1_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_prio_2 = io_allocate_bits_prio_2_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_control = io_allocate_bits_control_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_opcode = io_allocate_bits_opcode_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_param = io_allocate_bits_param_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_size = io_allocate_bits_size_0; // @[MSHR.scala:84:7, :504:34]
wire [6:0] allocate_as_full_source = io_allocate_bits_source_0; // @[MSHR.scala:84:7, :504:34]
wire [12:0] allocate_as_full_tag = io_allocate_bits_tag_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_offset = io_allocate_bits_offset_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_put = io_allocate_bits_put_0; // @[MSHR.scala:84:7, :504:34]
wire [9:0] allocate_as_full_set = io_allocate_bits_set_0; // @[MSHR.scala:84:7, :504:34]
wire _io_status_bits_blockB_T_8; // @[MSHR.scala:168:40]
wire _io_status_bits_nestB_T_4; // @[MSHR.scala:169:93]
wire _io_status_bits_blockC_T; // @[MSHR.scala:172:28]
wire _io_status_bits_nestC_T_5; // @[MSHR.scala:173:39]
wire _io_schedule_valid_T_5; // @[MSHR.scala:193:105]
wire _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:184:55]
wire _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:283:91]
wire _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:185:41]
wire [2:0] _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:286:41]
wire [12:0] _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:287:41]
wire _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:289:51]
wire _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:186:64]
wire [2:0] _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:290:41]
wire [2:0] _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:291:41]
wire _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:187:57]
wire [2:0] _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:298:41]
wire _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:188:43]
wire _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:189:40]
wire _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:190:66]
wire _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:310:41]
wire [1:0] _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:310:41]
wire _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:310:41]
wire [12:0] _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:310:41]
wire no_wait; // @[MSHR.scala:183:83]
wire [9:0] io_status_bits_set_0; // @[MSHR.scala:84:7]
wire [12:0] io_status_bits_tag_0; // @[MSHR.scala:84:7]
wire [2:0] io_status_bits_way_0; // @[MSHR.scala:84:7]
wire io_status_bits_blockB_0; // @[MSHR.scala:84:7]
wire io_status_bits_nestB_0; // @[MSHR.scala:84:7]
wire io_status_bits_blockC_0; // @[MSHR.scala:84:7]
wire io_status_bits_nestC_0; // @[MSHR.scala:84:7]
wire io_status_valid_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_a_bits_tag_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_a_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_a_bits_param_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_a_bits_block_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_a_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_b_bits_param_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_b_bits_tag_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_b_bits_set_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_b_bits_clients_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_opcode_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_param_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_c_bits_tag_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_c_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_c_bits_dirty_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_0_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_1_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_2_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_control_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_opcode_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_param_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_size_0; // @[MSHR.scala:84:7]
wire [6:0] io_schedule_bits_d_bits_source_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_d_bits_tag_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_offset_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_put_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_d_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_bad_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_e_bits_sink_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_bits_data_dirty_0; // @[MSHR.scala:84:7]
wire [1:0] io_schedule_bits_dir_bits_data_state_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_bits_data_clients_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_dir_bits_data_tag_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_dir_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_dir_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_reload_0; // @[MSHR.scala:84:7]
wire io_schedule_valid_0; // @[MSHR.scala:84:7]
reg request_valid; // @[MSHR.scala:97:30]
assign io_status_valid_0 = request_valid; // @[MSHR.scala:84:7, :97:30]
reg request_prio_0; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_0_0 = request_prio_0; // @[MSHR.scala:84:7, :98:20]
reg request_prio_1; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_1_0 = request_prio_1; // @[MSHR.scala:84:7, :98:20]
reg request_prio_2; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_2_0 = request_prio_2; // @[MSHR.scala:84:7, :98:20]
reg request_control; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_control_0 = request_control; // @[MSHR.scala:84:7, :98:20]
reg [2:0] request_opcode; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_opcode_0 = request_opcode; // @[MSHR.scala:84:7, :98:20]
reg [2:0] request_param; // @[MSHR.scala:98:20]
reg [2:0] request_size; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_size_0 = request_size; // @[MSHR.scala:84:7, :98:20]
reg [6:0] request_source; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_source_0 = request_source; // @[MSHR.scala:84:7, :98:20]
reg [12:0] request_tag; // @[MSHR.scala:98:20]
assign io_status_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_a_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_d_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_offset; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_offset_0 = request_offset; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_put; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_put_0 = request_put; // @[MSHR.scala:84:7, :98:20]
reg [9:0] request_set; // @[MSHR.scala:98:20]
assign io_status_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_a_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_b_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_c_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_d_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_dir_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
reg meta_valid; // @[MSHR.scala:99:27]
reg meta_dirty; // @[MSHR.scala:100:17]
assign io_schedule_bits_c_bits_dirty_0 = meta_dirty; // @[MSHR.scala:84:7, :100:17]
reg [1:0] meta_state; // @[MSHR.scala:100:17]
reg meta_clients; // @[MSHR.scala:100:17]
wire _meta_no_clients_T = meta_clients; // @[MSHR.scala:100:17, :220:39]
wire evict_c = meta_clients; // @[MSHR.scala:100:17, :315:27]
wire before_c = meta_clients; // @[MSHR.scala:100:17, :315:27]
reg [12:0] meta_tag; // @[MSHR.scala:100:17]
assign io_schedule_bits_c_bits_tag_0 = meta_tag; // @[MSHR.scala:84:7, :100:17]
reg meta_hit; // @[MSHR.scala:100:17]
reg [2:0] meta_way; // @[MSHR.scala:100:17]
assign io_status_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_c_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_d_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_dir_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
wire [2:0] final_meta_writeback_way = meta_way; // @[MSHR.scala:100:17, :215:38]
reg s_rprobe; // @[MSHR.scala:121:33]
reg w_rprobeackfirst; // @[MSHR.scala:122:33]
reg w_rprobeacklast; // @[MSHR.scala:123:33]
reg s_release; // @[MSHR.scala:124:33]
reg w_releaseack; // @[MSHR.scala:125:33]
reg s_pprobe; // @[MSHR.scala:126:33]
reg s_acquire; // @[MSHR.scala:127:33]
reg s_flush; // @[MSHR.scala:128:33]
reg w_grantfirst; // @[MSHR.scala:129:33]
reg w_grantlast; // @[MSHR.scala:130:33]
reg w_grant; // @[MSHR.scala:131:33]
reg w_pprobeackfirst; // @[MSHR.scala:132:33]
reg w_pprobeacklast; // @[MSHR.scala:133:33]
reg w_pprobeack; // @[MSHR.scala:134:33]
reg s_grantack; // @[MSHR.scala:136:33]
reg s_execute; // @[MSHR.scala:137:33]
reg w_grantack; // @[MSHR.scala:138:33]
reg s_writeback; // @[MSHR.scala:139:33]
reg [2:0] sink; // @[MSHR.scala:147:17]
assign io_schedule_bits_e_bits_sink_0 = sink; // @[MSHR.scala:84:7, :147:17]
reg gotT; // @[MSHR.scala:148:17]
reg bad_grant; // @[MSHR.scala:149:22]
assign io_schedule_bits_d_bits_bad_0 = bad_grant; // @[MSHR.scala:84:7, :149:22]
reg probes_done; // @[MSHR.scala:150:24]
reg probes_toN; // @[MSHR.scala:151:23]
reg probes_noT; // @[MSHR.scala:152:23]
wire _io_status_bits_blockB_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28]
wire _io_status_bits_blockB_T_1 = ~w_releaseack; // @[MSHR.scala:125:33, :168:45]
wire _io_status_bits_blockB_T_2 = ~w_rprobeacklast; // @[MSHR.scala:123:33, :168:62]
wire _io_status_bits_blockB_T_3 = _io_status_bits_blockB_T_1 | _io_status_bits_blockB_T_2; // @[MSHR.scala:168:{45,59,62}]
wire _io_status_bits_blockB_T_4 = ~w_pprobeacklast; // @[MSHR.scala:133:33, :168:82]
wire _io_status_bits_blockB_T_5 = _io_status_bits_blockB_T_3 | _io_status_bits_blockB_T_4; // @[MSHR.scala:168:{59,79,82}]
wire _io_status_bits_blockB_T_6 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103]
wire _io_status_bits_blockB_T_7 = _io_status_bits_blockB_T_5 & _io_status_bits_blockB_T_6; // @[MSHR.scala:168:{79,100,103}]
assign _io_status_bits_blockB_T_8 = _io_status_bits_blockB_T | _io_status_bits_blockB_T_7; // @[MSHR.scala:168:{28,40,100}]
assign io_status_bits_blockB_0 = _io_status_bits_blockB_T_8; // @[MSHR.scala:84:7, :168:40]
wire _io_status_bits_nestB_T = meta_valid & w_releaseack; // @[MSHR.scala:99:27, :125:33, :169:39]
wire _io_status_bits_nestB_T_1 = _io_status_bits_nestB_T & w_rprobeacklast; // @[MSHR.scala:123:33, :169:{39,55}]
wire _io_status_bits_nestB_T_2 = _io_status_bits_nestB_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :169:{55,74}]
wire _io_status_bits_nestB_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :169:96]
assign _io_status_bits_nestB_T_4 = _io_status_bits_nestB_T_2 & _io_status_bits_nestB_T_3; // @[MSHR.scala:169:{74,93,96}]
assign io_status_bits_nestB_0 = _io_status_bits_nestB_T_4; // @[MSHR.scala:84:7, :169:93]
assign _io_status_bits_blockC_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28, :172:28]
assign io_status_bits_blockC_0 = _io_status_bits_blockC_T; // @[MSHR.scala:84:7, :172:28]
wire _io_status_bits_nestC_T = ~w_rprobeackfirst; // @[MSHR.scala:122:33, :173:43]
wire _io_status_bits_nestC_T_1 = ~w_pprobeackfirst; // @[MSHR.scala:132:33, :173:64]
wire _io_status_bits_nestC_T_2 = _io_status_bits_nestC_T | _io_status_bits_nestC_T_1; // @[MSHR.scala:173:{43,61,64}]
wire _io_status_bits_nestC_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :173:85]
wire _io_status_bits_nestC_T_4 = _io_status_bits_nestC_T_2 | _io_status_bits_nestC_T_3; // @[MSHR.scala:173:{61,82,85}]
assign _io_status_bits_nestC_T_5 = meta_valid & _io_status_bits_nestC_T_4; // @[MSHR.scala:99:27, :173:{39,82}]
assign io_status_bits_nestC_0 = _io_status_bits_nestC_T_5; // @[MSHR.scala:84:7, :173:39]
wire _no_wait_T = w_rprobeacklast & w_releaseack; // @[MSHR.scala:123:33, :125:33, :183:33]
wire _no_wait_T_1 = _no_wait_T & w_grantlast; // @[MSHR.scala:130:33, :183:{33,49}]
wire _no_wait_T_2 = _no_wait_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :183:{49,64}]
assign no_wait = _no_wait_T_2 & w_grantack; // @[MSHR.scala:138:33, :183:{64,83}]
assign io_schedule_bits_reload_0 = no_wait; // @[MSHR.scala:84:7, :183:83]
wire _io_schedule_bits_a_valid_T = ~s_acquire; // @[MSHR.scala:127:33, :184:31]
wire _io_schedule_bits_a_valid_T_1 = _io_schedule_bits_a_valid_T & s_release; // @[MSHR.scala:124:33, :184:{31,42}]
assign _io_schedule_bits_a_valid_T_2 = _io_schedule_bits_a_valid_T_1 & s_pprobe; // @[MSHR.scala:126:33, :184:{42,55}]
assign io_schedule_bits_a_valid_0 = _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:84:7, :184:55]
wire _io_schedule_bits_b_valid_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31]
wire _io_schedule_bits_b_valid_T_1 = ~s_pprobe; // @[MSHR.scala:126:33, :185:44]
assign _io_schedule_bits_b_valid_T_2 = _io_schedule_bits_b_valid_T | _io_schedule_bits_b_valid_T_1; // @[MSHR.scala:185:{31,41,44}]
assign io_schedule_bits_b_valid_0 = _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:84:7, :185:41]
wire _io_schedule_bits_c_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32]
wire _io_schedule_bits_c_valid_T_1 = _io_schedule_bits_c_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :186:{32,43}]
assign _io_schedule_bits_c_valid_T_4 = _io_schedule_bits_c_valid_T_1; // @[MSHR.scala:186:{43,64}]
assign io_schedule_bits_c_valid_0 = _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:84:7, :186:64]
wire _io_schedule_bits_d_valid_T = ~s_execute; // @[MSHR.scala:137:33, :187:31]
wire _io_schedule_bits_d_valid_T_1 = _io_schedule_bits_d_valid_T & w_pprobeack; // @[MSHR.scala:134:33, :187:{31,42}]
assign _io_schedule_bits_d_valid_T_2 = _io_schedule_bits_d_valid_T_1 & w_grant; // @[MSHR.scala:131:33, :187:{42,57}]
assign io_schedule_bits_d_valid_0 = _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:84:7, :187:57]
wire _io_schedule_bits_e_valid_T = ~s_grantack; // @[MSHR.scala:136:33, :188:31]
assign _io_schedule_bits_e_valid_T_1 = _io_schedule_bits_e_valid_T & w_grantfirst; // @[MSHR.scala:129:33, :188:{31,43}]
assign io_schedule_bits_e_valid_0 = _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:84:7, :188:43]
wire _io_schedule_bits_x_valid_T = ~s_flush; // @[MSHR.scala:128:33, :189:31]
assign _io_schedule_bits_x_valid_T_1 = _io_schedule_bits_x_valid_T & w_releaseack; // @[MSHR.scala:125:33, :189:{31,40}]
assign io_schedule_bits_x_valid_0 = _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:84:7, :189:40]
wire _io_schedule_bits_dir_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :190:34]
wire _io_schedule_bits_dir_valid_T_1 = _io_schedule_bits_dir_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :190:{34,45}]
wire _io_schedule_bits_dir_valid_T_2 = ~s_writeback; // @[MSHR.scala:139:33, :190:70]
wire _io_schedule_bits_dir_valid_T_3 = _io_schedule_bits_dir_valid_T_2 & no_wait; // @[MSHR.scala:183:83, :190:{70,83}]
assign _io_schedule_bits_dir_valid_T_4 = _io_schedule_bits_dir_valid_T_1 | _io_schedule_bits_dir_valid_T_3; // @[MSHR.scala:190:{45,66,83}]
assign io_schedule_bits_dir_valid_0 = _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:84:7, :190:66]
wire _io_schedule_valid_T = io_schedule_bits_a_valid_0 | io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7, :192:49]
wire _io_schedule_valid_T_1 = _io_schedule_valid_T | io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7, :192:{49,77}]
wire _io_schedule_valid_T_2 = _io_schedule_valid_T_1 | io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7, :192:{77,105}]
wire _io_schedule_valid_T_3 = _io_schedule_valid_T_2 | io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7, :192:105, :193:49]
wire _io_schedule_valid_T_4 = _io_schedule_valid_T_3 | io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7, :193:{49,77}]
assign _io_schedule_valid_T_5 = _io_schedule_valid_T_4 | io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7, :193:{77,105}]
assign io_schedule_valid_0 = _io_schedule_valid_T_5; // @[MSHR.scala:84:7, :193:105]
wire _io_schedule_bits_dir_bits_data_WIRE_dirty = final_meta_writeback_dirty; // @[MSHR.scala:215:38, :310:71]
wire [1:0] _io_schedule_bits_dir_bits_data_WIRE_state = final_meta_writeback_state; // @[MSHR.scala:215:38, :310:71]
wire _io_schedule_bits_dir_bits_data_WIRE_clients = final_meta_writeback_clients; // @[MSHR.scala:215:38, :310:71]
wire after_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27]
wire prior_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27]
wire [12:0] _io_schedule_bits_dir_bits_data_WIRE_tag = final_meta_writeback_tag; // @[MSHR.scala:215:38, :310:71]
wire final_meta_writeback_hit; // @[MSHR.scala:215:38]
wire req_clientBit = request_source == 7'h40; // @[Parameters.scala:46:9]
wire _req_needT_T = request_opcode[2]; // @[Parameters.scala:269:12]
wire _final_meta_writeback_dirty_T_3 = request_opcode[2]; // @[Parameters.scala:269:12]
wire _req_needT_T_1 = ~_req_needT_T; // @[Parameters.scala:269:{5,12}]
wire _GEN = request_opcode == 3'h5; // @[Parameters.scala:270:13]
wire _req_needT_T_2; // @[Parameters.scala:270:13]
assign _req_needT_T_2 = _GEN; // @[Parameters.scala:270:13]
wire _excluded_client_T_6; // @[Parameters.scala:279:117]
assign _excluded_client_T_6 = _GEN; // @[Parameters.scala:270:13, :279:117]
wire _GEN_0 = request_param == 3'h1; // @[Parameters.scala:270:42]
wire _req_needT_T_3; // @[Parameters.scala:270:42]
assign _req_needT_T_3 = _GEN_0; // @[Parameters.scala:270:42]
wire _final_meta_writeback_clients_T; // @[Parameters.scala:282:11]
assign _final_meta_writeback_clients_T = _GEN_0; // @[Parameters.scala:270:42, :282:11]
wire _io_schedule_bits_d_bits_param_T_7; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_7 = _GEN_0; // @[Parameters.scala:270:42]
wire _req_needT_T_4 = _req_needT_T_2 & _req_needT_T_3; // @[Parameters.scala:270:{13,33,42}]
wire _req_needT_T_5 = _req_needT_T_1 | _req_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33]
wire _GEN_1 = request_opcode == 3'h6; // @[Parameters.scala:271:14]
wire _req_needT_T_6; // @[Parameters.scala:271:14]
assign _req_needT_T_6 = _GEN_1; // @[Parameters.scala:271:14]
wire _req_acquire_T; // @[MSHR.scala:219:36]
assign _req_acquire_T = _GEN_1; // @[Parameters.scala:271:14]
wire _excluded_client_T_1; // @[Parameters.scala:279:12]
assign _excluded_client_T_1 = _GEN_1; // @[Parameters.scala:271:14, :279:12]
wire _req_needT_T_7 = &request_opcode; // @[Parameters.scala:271:52]
wire _req_needT_T_8 = _req_needT_T_6 | _req_needT_T_7; // @[Parameters.scala:271:{14,42,52}]
wire _req_needT_T_9 = |request_param; // @[Parameters.scala:271:89]
wire _req_needT_T_10 = _req_needT_T_8 & _req_needT_T_9; // @[Parameters.scala:271:{42,80,89}]
wire req_needT = _req_needT_T_5 | _req_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80]
wire _req_acquire_T_1 = &request_opcode; // @[Parameters.scala:271:52]
wire req_acquire = _req_acquire_T | _req_acquire_T_1; // @[MSHR.scala:219:{36,53,71}]
wire meta_no_clients = ~_meta_no_clients_T; // @[MSHR.scala:220:{25,39}]
wire _req_promoteT_T = &meta_state; // @[MSHR.scala:100:17, :221:81]
wire _req_promoteT_T_1 = meta_no_clients & _req_promoteT_T; // @[MSHR.scala:220:25, :221:{67,81}]
wire _req_promoteT_T_2 = meta_hit ? _req_promoteT_T_1 : gotT; // @[MSHR.scala:100:17, :148:17, :221:{40,67}]
wire req_promoteT = req_acquire & _req_promoteT_T_2; // @[MSHR.scala:219:53, :221:{34,40}]
wire _final_meta_writeback_dirty_T = request_opcode[0]; // @[MSHR.scala:98:20, :224:65]
wire _final_meta_writeback_dirty_T_1 = meta_dirty | _final_meta_writeback_dirty_T; // @[MSHR.scala:100:17, :224:{48,65}]
wire _final_meta_writeback_state_T = request_param != 3'h3; // @[MSHR.scala:98:20, :225:55]
wire _GEN_2 = meta_state == 2'h2; // @[MSHR.scala:100:17, :225:78]
wire _final_meta_writeback_state_T_1; // @[MSHR.scala:225:78]
assign _final_meta_writeback_state_T_1 = _GEN_2; // @[MSHR.scala:225:78]
wire _final_meta_writeback_state_T_12; // @[MSHR.scala:240:70]
assign _final_meta_writeback_state_T_12 = _GEN_2; // @[MSHR.scala:225:78, :240:70]
wire _evict_T_2; // @[MSHR.scala:317:26]
assign _evict_T_2 = _GEN_2; // @[MSHR.scala:225:78, :317:26]
wire _before_T_1; // @[MSHR.scala:317:26]
assign _before_T_1 = _GEN_2; // @[MSHR.scala:225:78, :317:26]
wire _final_meta_writeback_state_T_2 = _final_meta_writeback_state_T & _final_meta_writeback_state_T_1; // @[MSHR.scala:225:{55,64,78}]
wire [1:0] _final_meta_writeback_state_T_3 = _final_meta_writeback_state_T_2 ? 2'h3 : meta_state; // @[MSHR.scala:100:17, :225:{40,64}]
wire _GEN_3 = request_param == 3'h2; // @[Parameters.scala:282:43]
wire _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:43]
assign _final_meta_writeback_clients_T_1 = _GEN_3; // @[Parameters.scala:282:43]
wire _io_schedule_bits_d_bits_param_T_5; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_5 = _GEN_3; // @[Parameters.scala:282:43]
wire _final_meta_writeback_clients_T_2 = _final_meta_writeback_clients_T | _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:{11,34,43}]
wire _final_meta_writeback_clients_T_3 = request_param == 3'h5; // @[Parameters.scala:282:75]
wire _final_meta_writeback_clients_T_4 = _final_meta_writeback_clients_T_2 | _final_meta_writeback_clients_T_3; // @[Parameters.scala:282:{34,66,75}]
wire _final_meta_writeback_clients_T_5 = _final_meta_writeback_clients_T_4 & req_clientBit; // @[Parameters.scala:46:9]
wire _final_meta_writeback_clients_T_6 = ~_final_meta_writeback_clients_T_5; // @[MSHR.scala:226:{52,56}]
wire _final_meta_writeback_clients_T_7 = meta_clients & _final_meta_writeback_clients_T_6; // @[MSHR.scala:100:17, :226:{50,52}]
wire _final_meta_writeback_clients_T_8 = ~probes_toN; // @[MSHR.scala:151:23, :232:54]
wire _final_meta_writeback_clients_T_9 = meta_clients & _final_meta_writeback_clients_T_8; // @[MSHR.scala:100:17, :232:{52,54}]
wire _final_meta_writeback_dirty_T_2 = meta_hit & meta_dirty; // @[MSHR.scala:100:17, :236:45]
wire _final_meta_writeback_dirty_T_4 = ~_final_meta_writeback_dirty_T_3; // @[MSHR.scala:236:{63,78}]
wire _final_meta_writeback_dirty_T_5 = _final_meta_writeback_dirty_T_2 | _final_meta_writeback_dirty_T_4; // @[MSHR.scala:236:{45,60,63}]
wire [1:0] _GEN_4 = {1'h1, ~req_acquire}; // @[MSHR.scala:219:53, :238:40]
wire [1:0] _final_meta_writeback_state_T_4; // @[MSHR.scala:238:40]
assign _final_meta_writeback_state_T_4 = _GEN_4; // @[MSHR.scala:238:40]
wire [1:0] _final_meta_writeback_state_T_6; // @[MSHR.scala:239:65]
assign _final_meta_writeback_state_T_6 = _GEN_4; // @[MSHR.scala:238:40, :239:65]
wire _final_meta_writeback_state_T_5 = ~meta_hit; // @[MSHR.scala:100:17, :239:41]
wire [1:0] _final_meta_writeback_state_T_7 = gotT ? _final_meta_writeback_state_T_6 : 2'h1; // @[MSHR.scala:148:17, :239:{55,65}]
wire _final_meta_writeback_state_T_8 = meta_no_clients & req_acquire; // @[MSHR.scala:219:53, :220:25, :244:72]
wire [1:0] _final_meta_writeback_state_T_9 = {1'h1, ~_final_meta_writeback_state_T_8}; // @[MSHR.scala:244:{55,72}]
wire _GEN_5 = meta_state == 2'h1; // @[MSHR.scala:100:17, :240:70]
wire _final_meta_writeback_state_T_10; // @[MSHR.scala:240:70]
assign _final_meta_writeback_state_T_10 = _GEN_5; // @[MSHR.scala:240:70]
wire _io_schedule_bits_c_bits_param_T; // @[MSHR.scala:291:53]
assign _io_schedule_bits_c_bits_param_T = _GEN_5; // @[MSHR.scala:240:70, :291:53]
wire _evict_T_1; // @[MSHR.scala:317:26]
assign _evict_T_1 = _GEN_5; // @[MSHR.scala:240:70, :317:26]
wire _before_T; // @[MSHR.scala:317:26]
assign _before_T = _GEN_5; // @[MSHR.scala:240:70, :317:26]
wire [1:0] _final_meta_writeback_state_T_13 = {_final_meta_writeback_state_T_12, 1'h1}; // @[MSHR.scala:240:70]
wire _final_meta_writeback_state_T_14 = &meta_state; // @[MSHR.scala:100:17, :221:81, :240:70]
wire [1:0] _final_meta_writeback_state_T_15 = _final_meta_writeback_state_T_14 ? _final_meta_writeback_state_T_9 : _final_meta_writeback_state_T_13; // @[MSHR.scala:240:70, :244:55]
wire [1:0] _final_meta_writeback_state_T_16 = _final_meta_writeback_state_T_5 ? _final_meta_writeback_state_T_7 : _final_meta_writeback_state_T_15; // @[MSHR.scala:239:{40,41,55}, :240:70]
wire [1:0] _final_meta_writeback_state_T_17 = req_needT ? _final_meta_writeback_state_T_4 : _final_meta_writeback_state_T_16; // @[Parameters.scala:270:70]
wire _final_meta_writeback_clients_T_10 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :245:66]
wire _final_meta_writeback_clients_T_11 = meta_clients & _final_meta_writeback_clients_T_10; // @[MSHR.scala:100:17, :245:{64,66}]
wire _final_meta_writeback_clients_T_12 = meta_hit & _final_meta_writeback_clients_T_11; // @[MSHR.scala:100:17, :245:{40,64}]
wire _final_meta_writeback_clients_T_13 = req_acquire & req_clientBit; // @[Parameters.scala:46:9]
wire _final_meta_writeback_clients_T_14 = _final_meta_writeback_clients_T_12 | _final_meta_writeback_clients_T_13; // @[MSHR.scala:245:{40,84}, :246:40]
assign final_meta_writeback_tag = request_prio_2 | request_control ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :215:38, :223:52, :228:53, :247:30]
wire _final_meta_writeback_clients_T_15 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :258:54]
wire _final_meta_writeback_clients_T_16 = meta_clients & _final_meta_writeback_clients_T_15; // @[MSHR.scala:100:17, :258:{52,54}]
assign final_meta_writeback_hit = bad_grant ? meta_hit : request_prio_2 | ~request_control; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :227:34, :228:53, :234:30, :248:30, :251:20, :252:21]
assign final_meta_writeback_dirty = ~bad_grant & (request_prio_2 ? _final_meta_writeback_dirty_T_1 : request_control ? ~meta_hit & meta_dirty : _final_meta_writeback_dirty_T_5); // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :224:{34,48}, :228:53, :229:21, :230:36, :236:{32,60}, :251:20, :252:21]
assign final_meta_writeback_state = bad_grant ? {1'h0, meta_hit} : request_prio_2 ? _final_meta_writeback_state_T_3 : request_control ? (meta_hit ? 2'h0 : meta_state) : _final_meta_writeback_state_T_17; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :225:{34,40}, :228:53, :229:21, :231:36, :237:{32,38}, :251:20, :252:21, :257:36, :263:36]
assign final_meta_writeback_clients = bad_grant ? meta_hit & _final_meta_writeback_clients_T_16 : request_prio_2 ? _final_meta_writeback_clients_T_7 : request_control ? (meta_hit ? _final_meta_writeback_clients_T_9 : meta_clients) : _final_meta_writeback_clients_T_14; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :226:{34,50}, :228:53, :229:21, :232:{36,52}, :245:{34,84}, :251:20, :252:21, :258:{36,52}, :264:36]
wire _honour_BtoT_T = meta_clients & req_clientBit; // @[Parameters.scala:46:9]
wire _honour_BtoT_T_1 = _honour_BtoT_T; // @[MSHR.scala:276:{47,64}]
wire honour_BtoT = meta_hit & _honour_BtoT_T_1; // @[MSHR.scala:100:17, :276:{30,64}]
wire _excluded_client_T = meta_hit & request_prio_0; // @[MSHR.scala:98:20, :100:17, :279:38]
wire _excluded_client_T_2 = &request_opcode; // @[Parameters.scala:271:52, :279:50]
wire _excluded_client_T_3 = _excluded_client_T_1 | _excluded_client_T_2; // @[Parameters.scala:279:{12,40,50}]
wire _excluded_client_T_4 = request_opcode == 3'h4; // @[Parameters.scala:279:87]
wire _excluded_client_T_5 = _excluded_client_T_3 | _excluded_client_T_4; // @[Parameters.scala:279:{40,77,87}]
wire _excluded_client_T_8 = _excluded_client_T_5; // @[Parameters.scala:279:{77,106}]
wire _excluded_client_T_9 = _excluded_client_T & _excluded_client_T_8; // @[Parameters.scala:279:106]
wire excluded_client = _excluded_client_T_9 & req_clientBit; // @[Parameters.scala:46:9]
wire [1:0] _io_schedule_bits_a_bits_param_T = meta_hit ? 2'h2 : 2'h1; // @[MSHR.scala:100:17, :282:56]
wire [1:0] _io_schedule_bits_a_bits_param_T_1 = req_needT ? _io_schedule_bits_a_bits_param_T : 2'h0; // @[Parameters.scala:270:70]
assign io_schedule_bits_a_bits_param_0 = {1'h0, _io_schedule_bits_a_bits_param_T_1}; // @[MSHR.scala:84:7, :282:{35,41}]
wire _io_schedule_bits_a_bits_block_T = request_size != 3'h6; // @[MSHR.scala:98:20, :283:51]
wire _io_schedule_bits_a_bits_block_T_1 = request_opcode == 3'h0; // @[MSHR.scala:98:20, :284:55]
wire _io_schedule_bits_a_bits_block_T_2 = &request_opcode; // @[Parameters.scala:271:52]
wire _io_schedule_bits_a_bits_block_T_3 = _io_schedule_bits_a_bits_block_T_1 | _io_schedule_bits_a_bits_block_T_2; // @[MSHR.scala:284:{55,71,89}]
wire _io_schedule_bits_a_bits_block_T_4 = ~_io_schedule_bits_a_bits_block_T_3; // @[MSHR.scala:284:{38,71}]
assign _io_schedule_bits_a_bits_block_T_5 = _io_schedule_bits_a_bits_block_T | _io_schedule_bits_a_bits_block_T_4; // @[MSHR.scala:283:{51,91}, :284:38]
assign io_schedule_bits_a_bits_block_0 = _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:84:7, :283:91]
wire _io_schedule_bits_b_bits_param_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :286:42]
wire [1:0] _io_schedule_bits_b_bits_param_T_1 = req_needT ? 2'h2 : 2'h1; // @[Parameters.scala:270:70]
wire [2:0] _io_schedule_bits_b_bits_param_T_2 = request_prio_1 ? request_param : {1'h0, _io_schedule_bits_b_bits_param_T_1}; // @[MSHR.scala:98:20, :286:{61,97}]
assign _io_schedule_bits_b_bits_param_T_3 = _io_schedule_bits_b_bits_param_T ? 3'h2 : _io_schedule_bits_b_bits_param_T_2; // @[MSHR.scala:286:{41,42,61}]
assign io_schedule_bits_b_bits_param_0 = _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:84:7, :286:41]
wire _io_schedule_bits_b_bits_tag_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :287:42]
assign _io_schedule_bits_b_bits_tag_T_1 = _io_schedule_bits_b_bits_tag_T ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :287:{41,42}]
assign io_schedule_bits_b_bits_tag_0 = _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:84:7, :287:41]
wire _io_schedule_bits_b_bits_clients_T = ~excluded_client; // @[MSHR.scala:279:28, :289:53]
assign _io_schedule_bits_b_bits_clients_T_1 = meta_clients & _io_schedule_bits_b_bits_clients_T; // @[MSHR.scala:100:17, :289:{51,53}]
assign io_schedule_bits_b_bits_clients_0 = _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:84:7, :289:51]
assign _io_schedule_bits_c_bits_opcode_T = {2'h3, meta_dirty}; // @[MSHR.scala:100:17, :290:41]
assign io_schedule_bits_c_bits_opcode_0 = _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:84:7, :290:41]
assign _io_schedule_bits_c_bits_param_T_1 = _io_schedule_bits_c_bits_param_T ? 3'h2 : 3'h1; // @[MSHR.scala:291:{41,53}]
assign io_schedule_bits_c_bits_param_0 = _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:84:7, :291:41]
wire _io_schedule_bits_d_bits_param_T = ~req_acquire; // @[MSHR.scala:219:53, :298:42]
wire [1:0] _io_schedule_bits_d_bits_param_T_1 = {1'h0, req_promoteT}; // @[MSHR.scala:221:34, :300:53]
wire [1:0] _io_schedule_bits_d_bits_param_T_2 = honour_BtoT ? 2'h2 : 2'h1; // @[MSHR.scala:276:30, :301:53]
wire _io_schedule_bits_d_bits_param_T_3 = ~(|request_param); // @[Parameters.scala:271:89]
wire [2:0] _io_schedule_bits_d_bits_param_T_4 = _io_schedule_bits_d_bits_param_T_3 ? {1'h0, _io_schedule_bits_d_bits_param_T_1} : request_param; // @[MSHR.scala:98:20, :299:79, :300:53]
wire [2:0] _io_schedule_bits_d_bits_param_T_6 = _io_schedule_bits_d_bits_param_T_5 ? {1'h0, _io_schedule_bits_d_bits_param_T_2} : _io_schedule_bits_d_bits_param_T_4; // @[MSHR.scala:299:79, :301:53]
wire [2:0] _io_schedule_bits_d_bits_param_T_8 = _io_schedule_bits_d_bits_param_T_7 ? 3'h1 : _io_schedule_bits_d_bits_param_T_6; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_9 = _io_schedule_bits_d_bits_param_T ? request_param : _io_schedule_bits_d_bits_param_T_8; // @[MSHR.scala:98:20, :298:{41,42}, :299:79]
assign io_schedule_bits_d_bits_param_0 = _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:84:7, :298:41]
wire _io_schedule_bits_dir_bits_data_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :310:42]
assign _io_schedule_bits_dir_bits_data_T_1_dirty = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_dirty; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_state = _io_schedule_bits_dir_bits_data_T ? 2'h0 : _io_schedule_bits_dir_bits_data_WIRE_state; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_clients = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_clients; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_tag = _io_schedule_bits_dir_bits_data_T ? 13'h0 : _io_schedule_bits_dir_bits_data_WIRE_tag; // @[MSHR.scala:310:{41,42,71}]
assign io_schedule_bits_dir_bits_data_dirty_0 = _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_state_0 = _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_clients_0 = _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_tag_0 = _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:84:7, :310:41]
wire _evict_T = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :338:32]
wire [3:0] evict; // @[MSHR.scala:314:26]
wire _evict_out_T = ~evict_c; // @[MSHR.scala:315:27, :318:32]
wire [1:0] _GEN_6 = {1'h1, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32]
wire [1:0] _evict_out_T_1; // @[MSHR.scala:319:32]
assign _evict_out_T_1 = _GEN_6; // @[MSHR.scala:319:32]
wire [1:0] _before_out_T_1; // @[MSHR.scala:319:32]
assign _before_out_T_1 = _GEN_6; // @[MSHR.scala:319:32]
wire _evict_T_3 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26]
wire [2:0] _GEN_7 = {2'h2, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:39]
wire [2:0] _evict_out_T_2; // @[MSHR.scala:320:39]
assign _evict_out_T_2 = _GEN_7; // @[MSHR.scala:320:39]
wire [2:0] _before_out_T_2; // @[MSHR.scala:320:39]
assign _before_out_T_2 = _GEN_7; // @[MSHR.scala:320:39]
wire [2:0] _GEN_8 = {2'h3, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:76]
wire [2:0] _evict_out_T_3; // @[MSHR.scala:320:76]
assign _evict_out_T_3 = _GEN_8; // @[MSHR.scala:320:76]
wire [2:0] _before_out_T_3; // @[MSHR.scala:320:76]
assign _before_out_T_3 = _GEN_8; // @[MSHR.scala:320:76]
wire [2:0] _evict_out_T_4 = evict_c ? _evict_out_T_2 : _evict_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _evict_T_4 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26]
wire _evict_T_5 = ~_evict_T; // @[MSHR.scala:323:11, :338:32]
assign evict = _evict_T_5 ? 4'h8 : _evict_T_1 ? {3'h0, _evict_out_T} : _evict_T_2 ? {2'h0, _evict_out_T_1} : _evict_T_3 ? {1'h0, _evict_out_T_4} : {_evict_T_4, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}]
wire [3:0] before_0; // @[MSHR.scala:314:26]
wire _before_out_T = ~before_c; // @[MSHR.scala:315:27, :318:32]
wire _before_T_2 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26]
wire [2:0] _before_out_T_4 = before_c ? _before_out_T_2 : _before_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _before_T_3 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26]
wire _before_T_4 = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :323:11]
assign before_0 = _before_T_4 ? 4'h8 : _before_T ? {3'h0, _before_out_T} : _before_T_1 ? {2'h0, _before_out_T_1} : _before_T_2 ? {1'h0, _before_out_T_4} : {_before_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}]
wire [3:0] after; // @[MSHR.scala:314:26]
wire _GEN_9 = final_meta_writeback_state == 2'h1; // @[MSHR.scala:215:38, :317:26]
wire _after_T; // @[MSHR.scala:317:26]
assign _after_T = _GEN_9; // @[MSHR.scala:317:26]
wire _prior_T; // @[MSHR.scala:317:26]
assign _prior_T = _GEN_9; // @[MSHR.scala:317:26]
wire _after_out_T = ~after_c; // @[MSHR.scala:315:27, :318:32]
wire _GEN_10 = final_meta_writeback_state == 2'h2; // @[MSHR.scala:215:38, :317:26]
wire _after_T_1; // @[MSHR.scala:317:26]
assign _after_T_1 = _GEN_10; // @[MSHR.scala:317:26]
wire _prior_T_1; // @[MSHR.scala:317:26]
assign _prior_T_1 = _GEN_10; // @[MSHR.scala:317:26]
wire [1:0] _GEN_11 = {1'h1, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32]
wire [1:0] _after_out_T_1; // @[MSHR.scala:319:32]
assign _after_out_T_1 = _GEN_11; // @[MSHR.scala:319:32]
wire [1:0] _prior_out_T_1; // @[MSHR.scala:319:32]
assign _prior_out_T_1 = _GEN_11; // @[MSHR.scala:319:32]
wire _after_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26]
wire [2:0] _GEN_12 = {2'h2, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:39]
wire [2:0] _after_out_T_2; // @[MSHR.scala:320:39]
assign _after_out_T_2 = _GEN_12; // @[MSHR.scala:320:39]
wire [2:0] _prior_out_T_2; // @[MSHR.scala:320:39]
assign _prior_out_T_2 = _GEN_12; // @[MSHR.scala:320:39]
wire [2:0] _GEN_13 = {2'h3, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:76]
wire [2:0] _after_out_T_3; // @[MSHR.scala:320:76]
assign _after_out_T_3 = _GEN_13; // @[MSHR.scala:320:76]
wire [2:0] _prior_out_T_3; // @[MSHR.scala:320:76]
assign _prior_out_T_3 = _GEN_13; // @[MSHR.scala:320:76]
wire [2:0] _after_out_T_4 = after_c ? _after_out_T_2 : _after_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _GEN_14 = final_meta_writeback_state == 2'h0; // @[MSHR.scala:215:38, :317:26]
wire _after_T_3; // @[MSHR.scala:317:26]
assign _after_T_3 = _GEN_14; // @[MSHR.scala:317:26]
wire _prior_T_3; // @[MSHR.scala:317:26]
assign _prior_T_3 = _GEN_14; // @[MSHR.scala:317:26]
assign after = _after_T ? {3'h0, _after_out_T} : _after_T_1 ? {2'h0, _after_out_T_1} : _after_T_2 ? {1'h0, _after_out_T_4} : {_after_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26]
wire probe_bit = io_sinkc_bits_source_0 == 7'h40; // @[Parameters.scala:46:9]
wire _GEN_15 = probes_done | probe_bit; // @[Parameters.scala:46:9]
wire _last_probe_T; // @[MSHR.scala:459:33]
assign _last_probe_T = _GEN_15; // @[MSHR.scala:459:33]
wire _probes_done_T; // @[MSHR.scala:467:32]
assign _probes_done_T = _GEN_15; // @[MSHR.scala:459:33, :467:32]
wire _last_probe_T_1 = ~excluded_client; // @[MSHR.scala:279:28, :289:53, :459:66]
wire _last_probe_T_2 = meta_clients & _last_probe_T_1; // @[MSHR.scala:100:17, :459:{64,66}]
wire last_probe = _last_probe_T == _last_probe_T_2; // @[MSHR.scala:459:{33,46,64}]
wire _probe_toN_T = io_sinkc_bits_param_0 == 3'h1; // @[Parameters.scala:282:11]
wire _probe_toN_T_1 = io_sinkc_bits_param_0 == 3'h2; // @[Parameters.scala:282:43]
wire _probe_toN_T_2 = _probe_toN_T | _probe_toN_T_1; // @[Parameters.scala:282:{11,34,43}]
wire _probe_toN_T_3 = io_sinkc_bits_param_0 == 3'h5; // @[Parameters.scala:282:75]
wire probe_toN = _probe_toN_T_2 | _probe_toN_T_3; // @[Parameters.scala:282:{34,66,75}]
wire _probes_toN_T = probe_toN & probe_bit; // @[Parameters.scala:46:9]
wire _probes_toN_T_1 = probes_toN | _probes_toN_T; // @[MSHR.scala:151:23, :468:{30,35}]
wire _probes_noT_T = io_sinkc_bits_param_0 != 3'h3; // @[MSHR.scala:84:7, :469:53]
wire _probes_noT_T_1 = probes_noT | _probes_noT_T; // @[MSHR.scala:152:23, :469:{30,53}]
wire _w_rprobeackfirst_T = w_rprobeackfirst | last_probe; // @[MSHR.scala:122:33, :459:46, :470:42]
wire _GEN_16 = last_probe & io_sinkc_bits_last_0; // @[MSHR.scala:84:7, :459:46, :471:55]
wire _w_rprobeacklast_T; // @[MSHR.scala:471:55]
assign _w_rprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55]
wire _w_pprobeacklast_T; // @[MSHR.scala:473:55]
assign _w_pprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55, :473:55]
wire _w_rprobeacklast_T_1 = w_rprobeacklast | _w_rprobeacklast_T; // @[MSHR.scala:123:33, :471:{40,55}]
wire _w_pprobeackfirst_T = w_pprobeackfirst | last_probe; // @[MSHR.scala:132:33, :459:46, :472:42]
wire _w_pprobeacklast_T_1 = w_pprobeacklast | _w_pprobeacklast_T; // @[MSHR.scala:133:33, :473:{40,55}]
wire _set_pprobeack_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77]
wire _set_pprobeack_T_1 = io_sinkc_bits_last_0 | _set_pprobeack_T; // @[MSHR.scala:84:7, :475:{59,77}]
wire set_pprobeack = last_probe & _set_pprobeack_T_1; // @[MSHR.scala:459:46, :475:{36,59}]
wire _w_pprobeack_T = w_pprobeack | set_pprobeack; // @[MSHR.scala:134:33, :475:36, :476:32]
wire _w_grant_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77, :490:33]
wire _w_grant_T_1 = _w_grant_T | io_sinkd_bits_last_0; // @[MSHR.scala:84:7, :490:{33,41}]
wire _gotT_T = io_sinkd_bits_param_0 == 3'h0; // @[MSHR.scala:84:7, :493:35]
wire _new_meta_T = io_allocate_valid_0 & io_allocate_bits_repeat_0; // @[MSHR.scala:84:7, :505:40]
wire new_meta_dirty = _new_meta_T ? final_meta_writeback_dirty : io_directory_bits_dirty_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [1:0] new_meta_state = _new_meta_T ? final_meta_writeback_state : io_directory_bits_state_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_meta_clients = _new_meta_T ? final_meta_writeback_clients : io_directory_bits_clients_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [12:0] new_meta_tag = _new_meta_T ? final_meta_writeback_tag : io_directory_bits_tag_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_meta_hit = _new_meta_T ? final_meta_writeback_hit : io_directory_bits_hit_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [2:0] new_meta_way = _new_meta_T ? final_meta_writeback_way : io_directory_bits_way_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_request_prio_0 = io_allocate_valid_0 ? allocate_as_full_prio_0 : request_prio_0; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_prio_1 = io_allocate_valid_0 ? allocate_as_full_prio_1 : request_prio_1; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_prio_2 = io_allocate_valid_0 ? allocate_as_full_prio_2 : request_prio_2; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_control = io_allocate_valid_0 ? allocate_as_full_control : request_control; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_opcode = io_allocate_valid_0 ? allocate_as_full_opcode : request_opcode; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_param = io_allocate_valid_0 ? allocate_as_full_param : request_param; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_size = io_allocate_valid_0 ? allocate_as_full_size : request_size; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [6:0] new_request_source = io_allocate_valid_0 ? allocate_as_full_source : request_source; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [12:0] new_request_tag = io_allocate_valid_0 ? allocate_as_full_tag : request_tag; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_offset = io_allocate_valid_0 ? allocate_as_full_offset : request_offset; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_put = io_allocate_valid_0 ? allocate_as_full_put : request_put; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [9:0] new_request_set = io_allocate_valid_0 ? allocate_as_full_set : request_set; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire _new_needT_T = new_request_opcode[2]; // @[Parameters.scala:269:12]
wire _new_needT_T_1 = ~_new_needT_T; // @[Parameters.scala:269:{5,12}]
wire _GEN_17 = new_request_opcode == 3'h5; // @[Parameters.scala:270:13]
wire _new_needT_T_2; // @[Parameters.scala:270:13]
assign _new_needT_T_2 = _GEN_17; // @[Parameters.scala:270:13]
wire _new_skipProbe_T_5; // @[Parameters.scala:279:117]
assign _new_skipProbe_T_5 = _GEN_17; // @[Parameters.scala:270:13, :279:117]
wire _new_needT_T_3 = new_request_param == 3'h1; // @[Parameters.scala:270:42]
wire _new_needT_T_4 = _new_needT_T_2 & _new_needT_T_3; // @[Parameters.scala:270:{13,33,42}]
wire _new_needT_T_5 = _new_needT_T_1 | _new_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33]
wire _T_615 = new_request_opcode == 3'h6; // @[Parameters.scala:271:14]
wire _new_needT_T_6; // @[Parameters.scala:271:14]
assign _new_needT_T_6 = _T_615; // @[Parameters.scala:271:14]
wire _new_skipProbe_T; // @[Parameters.scala:279:12]
assign _new_skipProbe_T = _T_615; // @[Parameters.scala:271:14, :279:12]
wire _new_needT_T_7 = &new_request_opcode; // @[Parameters.scala:271:52]
wire _new_needT_T_8 = _new_needT_T_6 | _new_needT_T_7; // @[Parameters.scala:271:{14,42,52}]
wire _new_needT_T_9 = |new_request_param; // @[Parameters.scala:271:89]
wire _new_needT_T_10 = _new_needT_T_8 & _new_needT_T_9; // @[Parameters.scala:271:{42,80,89}]
wire new_needT = _new_needT_T_5 | _new_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80]
wire new_clientBit = new_request_source == 7'h40; // @[Parameters.scala:46:9]
wire _new_skipProbe_T_1 = &new_request_opcode; // @[Parameters.scala:271:52, :279:50]
wire _new_skipProbe_T_2 = _new_skipProbe_T | _new_skipProbe_T_1; // @[Parameters.scala:279:{12,40,50}]
wire _new_skipProbe_T_3 = new_request_opcode == 3'h4; // @[Parameters.scala:279:87]
wire _new_skipProbe_T_4 = _new_skipProbe_T_2 | _new_skipProbe_T_3; // @[Parameters.scala:279:{40,77,87}]
wire _new_skipProbe_T_7 = _new_skipProbe_T_4; // @[Parameters.scala:279:{77,106}]
wire new_skipProbe = _new_skipProbe_T_7 & new_clientBit; // @[Parameters.scala:46:9]
wire [3:0] prior; // @[MSHR.scala:314:26]
wire _prior_out_T = ~prior_c; // @[MSHR.scala:315:27, :318:32]
wire _prior_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26]
wire [2:0] _prior_out_T_4 = prior_c ? _prior_out_T_2 : _prior_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
assign prior = _prior_T ? {3'h0, _prior_out_T} : _prior_T_1 ? {2'h0, _prior_out_T_1} : _prior_T_2 ? {1'h0, _prior_out_T_4} : {_prior_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26]
wire _T_574 = io_directory_valid_0 | _new_meta_T; // @[MSHR.scala:84:7, :505:40, :539:28] |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module TLBuffer_a26d64s11k1z2u( // @[Buffer.scala:40:9]
input clock, // @[Buffer.scala:40:9]
input reset, // @[Buffer.scala:40:9]
output auto_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [10:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [25:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [10:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [10:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [25:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [10:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_d_bits_data // @[LazyModuleImp.scala:107:25]
);
wire auto_in_a_valid_0 = auto_in_a_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_opcode_0 = auto_in_a_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_param_0 = auto_in_a_bits_param; // @[Buffer.scala:40:9]
wire [1:0] auto_in_a_bits_size_0 = auto_in_a_bits_size; // @[Buffer.scala:40:9]
wire [10:0] auto_in_a_bits_source_0 = auto_in_a_bits_source; // @[Buffer.scala:40:9]
wire [25:0] auto_in_a_bits_address_0 = auto_in_a_bits_address; // @[Buffer.scala:40:9]
wire [7:0] auto_in_a_bits_mask_0 = auto_in_a_bits_mask; // @[Buffer.scala:40:9]
wire [63:0] auto_in_a_bits_data_0 = auto_in_a_bits_data; // @[Buffer.scala:40:9]
wire auto_in_a_bits_corrupt_0 = auto_in_a_bits_corrupt; // @[Buffer.scala:40:9]
wire auto_in_d_ready_0 = auto_in_d_ready; // @[Buffer.scala:40:9]
wire auto_out_a_ready_0 = auto_out_a_ready; // @[Buffer.scala:40:9]
wire auto_out_d_valid_0 = auto_out_d_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_out_d_bits_opcode_0 = auto_out_d_bits_opcode; // @[Buffer.scala:40:9]
wire [1:0] auto_out_d_bits_size_0 = auto_out_d_bits_size; // @[Buffer.scala:40:9]
wire [10:0] auto_out_d_bits_source_0 = auto_out_d_bits_source; // @[Buffer.scala:40:9]
wire [63:0] auto_out_d_bits_data_0 = auto_out_d_bits_data; // @[Buffer.scala:40:9]
wire auto_out_d_bits_sink = 1'h0; // @[Decoupled.scala:362:21]
wire auto_out_d_bits_denied = 1'h0; // @[Decoupled.scala:362:21]
wire auto_out_d_bits_corrupt = 1'h0; // @[Decoupled.scala:362:21]
wire nodeOut_d_bits_sink = 1'h0; // @[Decoupled.scala:362:21]
wire nodeOut_d_bits_denied = 1'h0; // @[Decoupled.scala:362:21]
wire nodeOut_d_bits_corrupt = 1'h0; // @[Decoupled.scala:362:21]
wire [1:0] auto_out_d_bits_param = 2'h0; // @[Decoupled.scala:362:21]
wire nodeIn_a_ready; // @[MixedNode.scala:551:17]
wire [1:0] nodeOut_d_bits_param = 2'h0; // @[Decoupled.scala:362:21]
wire nodeIn_a_valid = auto_in_a_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_opcode = auto_in_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_param = auto_in_a_bits_param_0; // @[Buffer.scala:40:9]
wire [1:0] nodeIn_a_bits_size = auto_in_a_bits_size_0; // @[Buffer.scala:40:9]
wire [10:0] nodeIn_a_bits_source = auto_in_a_bits_source_0; // @[Buffer.scala:40:9]
wire [25:0] nodeIn_a_bits_address = auto_in_a_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] nodeIn_a_bits_mask = auto_in_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] nodeIn_a_bits_data = auto_in_a_bits_data_0; // @[Buffer.scala:40:9]
wire nodeIn_a_bits_corrupt = auto_in_a_bits_corrupt_0; // @[Buffer.scala:40:9]
wire nodeIn_d_ready = auto_in_d_ready_0; // @[Buffer.scala:40:9]
wire nodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] nodeIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [1:0] nodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [10:0] nodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] nodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire nodeOut_a_ready = auto_out_a_ready_0; // @[Buffer.scala:40:9]
wire nodeOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [1:0] nodeOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [10:0] nodeOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [25:0] nodeOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire nodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire nodeOut_d_ready; // @[MixedNode.scala:542:17]
wire nodeOut_d_valid = auto_out_d_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeOut_d_bits_opcode = auto_out_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [1:0] nodeOut_d_bits_size = auto_out_d_bits_size_0; // @[Buffer.scala:40:9]
wire [10:0] nodeOut_d_bits_source = auto_out_d_bits_source_0; // @[Buffer.scala:40:9]
wire [63:0] nodeOut_d_bits_data = auto_out_d_bits_data_0; // @[Buffer.scala:40:9]
wire auto_in_a_ready_0; // @[Buffer.scala:40:9]
wire [2:0] auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [1:0] auto_in_d_bits_param_0; // @[Buffer.scala:40:9]
wire [1:0] auto_in_d_bits_size_0; // @[Buffer.scala:40:9]
wire [10:0] auto_in_d_bits_source_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_sink_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_denied_0; // @[Buffer.scala:40:9]
wire [63:0] auto_in_d_bits_data_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_in_d_valid_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_param_0; // @[Buffer.scala:40:9]
wire [1:0] auto_out_a_bits_size_0; // @[Buffer.scala:40:9]
wire [10:0] auto_out_a_bits_source_0; // @[Buffer.scala:40:9]
wire [25:0] auto_out_a_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] auto_out_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] auto_out_a_bits_data_0; // @[Buffer.scala:40:9]
wire auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_out_a_valid_0; // @[Buffer.scala:40:9]
wire auto_out_d_ready_0; // @[Buffer.scala:40:9]
assign auto_in_a_ready_0 = nodeIn_a_ready; // @[Buffer.scala:40:9]
assign auto_in_d_valid_0 = nodeIn_d_valid; // @[Buffer.scala:40:9]
assign auto_in_d_bits_opcode_0 = nodeIn_d_bits_opcode; // @[Buffer.scala:40:9]
assign auto_in_d_bits_param_0 = nodeIn_d_bits_param; // @[Buffer.scala:40:9]
assign auto_in_d_bits_size_0 = nodeIn_d_bits_size; // @[Buffer.scala:40:9]
assign auto_in_d_bits_source_0 = nodeIn_d_bits_source; // @[Buffer.scala:40:9]
assign auto_in_d_bits_sink_0 = nodeIn_d_bits_sink; // @[Buffer.scala:40:9]
assign auto_in_d_bits_denied_0 = nodeIn_d_bits_denied; // @[Buffer.scala:40:9]
assign auto_in_d_bits_data_0 = nodeIn_d_bits_data; // @[Buffer.scala:40:9]
assign auto_in_d_bits_corrupt_0 = nodeIn_d_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_a_valid_0 = nodeOut_a_valid; // @[Buffer.scala:40:9]
assign auto_out_a_bits_opcode_0 = nodeOut_a_bits_opcode; // @[Buffer.scala:40:9]
assign auto_out_a_bits_param_0 = nodeOut_a_bits_param; // @[Buffer.scala:40:9]
assign auto_out_a_bits_size_0 = nodeOut_a_bits_size; // @[Buffer.scala:40:9]
assign auto_out_a_bits_source_0 = nodeOut_a_bits_source; // @[Buffer.scala:40:9]
assign auto_out_a_bits_address_0 = nodeOut_a_bits_address; // @[Buffer.scala:40:9]
assign auto_out_a_bits_mask_0 = nodeOut_a_bits_mask; // @[Buffer.scala:40:9]
assign auto_out_a_bits_data_0 = nodeOut_a_bits_data; // @[Buffer.scala:40:9]
assign auto_out_a_bits_corrupt_0 = nodeOut_a_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_d_ready_0 = nodeOut_d_ready; // @[Buffer.scala:40:9]
TLMonitor_23 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (nodeIn_a_ready), // @[MixedNode.scala:551:17]
.io_in_a_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_in_a_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_a_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_in_a_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_in_a_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_in_a_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_in_a_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_in_a_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_in_a_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_d_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_in_d_valid (nodeIn_d_valid), // @[MixedNode.scala:551:17]
.io_in_d_bits_opcode (nodeIn_d_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_d_bits_param (nodeIn_d_bits_param), // @[MixedNode.scala:551:17]
.io_in_d_bits_size (nodeIn_d_bits_size), // @[MixedNode.scala:551:17]
.io_in_d_bits_source (nodeIn_d_bits_source), // @[MixedNode.scala:551:17]
.io_in_d_bits_sink (nodeIn_d_bits_sink), // @[MixedNode.scala:551:17]
.io_in_d_bits_denied (nodeIn_d_bits_denied), // @[MixedNode.scala:551:17]
.io_in_d_bits_data (nodeIn_d_bits_data), // @[MixedNode.scala:551:17]
.io_in_d_bits_corrupt (nodeIn_d_bits_corrupt) // @[MixedNode.scala:551:17]
); // @[Nodes.scala:27:25]
Queue1_TLBundleA_a26d64s11k1z2u nodeOut_a_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeIn_a_ready),
.io_enq_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_enq_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_enq_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_enq_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_enq_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_enq_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_enq_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_enq_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_enq_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_deq_ready (nodeOut_a_ready), // @[MixedNode.scala:542:17]
.io_deq_valid (nodeOut_a_valid),
.io_deq_bits_opcode (nodeOut_a_bits_opcode),
.io_deq_bits_param (nodeOut_a_bits_param),
.io_deq_bits_size (nodeOut_a_bits_size),
.io_deq_bits_source (nodeOut_a_bits_source),
.io_deq_bits_address (nodeOut_a_bits_address),
.io_deq_bits_mask (nodeOut_a_bits_mask),
.io_deq_bits_data (nodeOut_a_bits_data),
.io_deq_bits_corrupt (nodeOut_a_bits_corrupt)
); // @[Decoupled.scala:362:21]
Queue1_TLBundleD_a26d64s11k1z2u nodeIn_d_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeOut_d_ready),
.io_enq_valid (nodeOut_d_valid), // @[MixedNode.scala:542:17]
.io_enq_bits_opcode (nodeOut_d_bits_opcode), // @[MixedNode.scala:542:17]
.io_enq_bits_size (nodeOut_d_bits_size), // @[MixedNode.scala:542:17]
.io_enq_bits_source (nodeOut_d_bits_source), // @[MixedNode.scala:542:17]
.io_enq_bits_data (nodeOut_d_bits_data), // @[MixedNode.scala:542:17]
.io_deq_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_deq_valid (nodeIn_d_valid),
.io_deq_bits_opcode (nodeIn_d_bits_opcode),
.io_deq_bits_param (nodeIn_d_bits_param),
.io_deq_bits_size (nodeIn_d_bits_size),
.io_deq_bits_source (nodeIn_d_bits_source),
.io_deq_bits_sink (nodeIn_d_bits_sink),
.io_deq_bits_denied (nodeIn_d_bits_denied),
.io_deq_bits_data (nodeIn_d_bits_data),
.io_deq_bits_corrupt (nodeIn_d_bits_corrupt)
); // @[Decoupled.scala:362:21]
assign auto_in_a_ready = auto_in_a_ready_0; // @[Buffer.scala:40:9]
assign auto_in_d_valid = auto_in_d_valid_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_opcode = auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_param = auto_in_d_bits_param_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_size = auto_in_d_bits_size_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_source = auto_in_d_bits_source_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_sink = auto_in_d_bits_sink_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_denied = auto_in_d_bits_denied_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_data = auto_in_d_bits_data_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_corrupt = auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_a_valid = auto_out_a_valid_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_opcode = auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_param = auto_out_a_bits_param_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_size = auto_out_a_bits_size_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_source = auto_out_a_bits_source_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_address = auto_out_a_bits_address_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_mask = auto_out_a_bits_mask_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_data = auto_out_a_bits_data_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_corrupt = auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_d_ready = auto_out_d_ready_0; // @[Buffer.scala:40:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Metadata.scala:
// See LICENSE.SiFive for license details.
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.constants.MemoryOpConstants
import freechips.rocketchip.util._
object ClientStates {
val width = 2
def Nothing = 0.U(width.W)
def Branch = 1.U(width.W)
def Trunk = 2.U(width.W)
def Dirty = 3.U(width.W)
def hasReadPermission(state: UInt): Bool = state > Nothing
def hasWritePermission(state: UInt): Bool = state > Branch
}
object MemoryOpCategories extends MemoryOpConstants {
def wr = Cat(true.B, true.B) // Op actually writes
def wi = Cat(false.B, true.B) // Future op will write
def rd = Cat(false.B, false.B) // Op only reads
def categorize(cmd: UInt): UInt = {
val cat = Cat(isWrite(cmd), isWriteIntent(cmd))
//assert(cat.isOneOf(wr,wi,rd), "Could not categorize command.")
cat
}
}
/** Stores the client-side coherence information,
* such as permissions on the data and whether the data is dirty.
* Its API can be used to make TileLink messages in response to
* memory operations, cache control oeprations, or Probe messages.
*/
class ClientMetadata extends Bundle {
/** Actual state information stored in this bundle */
val state = UInt(ClientStates.width.W)
/** Metadata equality */
def ===(rhs: UInt): Bool = state === rhs
def ===(rhs: ClientMetadata): Bool = state === rhs.state
def =/=(rhs: ClientMetadata): Bool = !this.===(rhs)
/** Is the block's data present in this cache */
def isValid(dummy: Int = 0): Bool = state > ClientStates.Nothing
/** Determine whether this cmd misses, and the new state (on hit) or param to be sent (on miss) */
private def growStarter(cmd: UInt): (Bool, UInt) = {
import MemoryOpCategories._
import TLPermissions._
import ClientStates._
val c = categorize(cmd)
MuxTLookup(Cat(c, state), (false.B, 0.U), Seq(
//(effect, am now) -> (was a hit, next)
Cat(rd, Dirty) -> (true.B, Dirty),
Cat(rd, Trunk) -> (true.B, Trunk),
Cat(rd, Branch) -> (true.B, Branch),
Cat(wi, Dirty) -> (true.B, Dirty),
Cat(wi, Trunk) -> (true.B, Trunk),
Cat(wr, Dirty) -> (true.B, Dirty),
Cat(wr, Trunk) -> (true.B, Dirty),
//(effect, am now) -> (was a miss, param)
Cat(rd, Nothing) -> (false.B, NtoB),
Cat(wi, Branch) -> (false.B, BtoT),
Cat(wi, Nothing) -> (false.B, NtoT),
Cat(wr, Branch) -> (false.B, BtoT),
Cat(wr, Nothing) -> (false.B, NtoT)))
}
/** Determine what state to go to after miss based on Grant param
* For now, doesn't depend on state (which may have been Probed).
*/
private def growFinisher(cmd: UInt, param: UInt): UInt = {
import MemoryOpCategories._
import TLPermissions._
import ClientStates._
val c = categorize(cmd)
//assert(c === rd || param === toT, "Client was expecting trunk permissions.")
MuxLookup(Cat(c, param), Nothing)(Seq(
//(effect param) -> (next)
Cat(rd, toB) -> Branch,
Cat(rd, toT) -> Trunk,
Cat(wi, toT) -> Trunk,
Cat(wr, toT) -> Dirty))
}
/** Does this cache have permissions on this block sufficient to perform op,
* and what to do next (Acquire message param or updated metadata). */
def onAccess(cmd: UInt): (Bool, UInt, ClientMetadata) = {
val r = growStarter(cmd)
(r._1, r._2, ClientMetadata(r._2))
}
/** Does a secondary miss on the block require another Acquire message */
def onSecondaryAccess(first_cmd: UInt, second_cmd: UInt): (Bool, Bool, UInt, ClientMetadata, UInt) = {
import MemoryOpCategories._
val r1 = growStarter(first_cmd)
val r2 = growStarter(second_cmd)
val needs_second_acq = isWriteIntent(second_cmd) && !isWriteIntent(first_cmd)
val hit_again = r1._1 && r2._1
val dirties = categorize(second_cmd) === wr
val biggest_grow_param = Mux(dirties, r2._2, r1._2)
val dirtiest_state = ClientMetadata(biggest_grow_param)
val dirtiest_cmd = Mux(dirties, second_cmd, first_cmd)
(needs_second_acq, hit_again, biggest_grow_param, dirtiest_state, dirtiest_cmd)
}
/** Metadata change on a returned Grant */
def onGrant(cmd: UInt, param: UInt): ClientMetadata = ClientMetadata(growFinisher(cmd, param))
/** Determine what state to go to based on Probe param */
private def shrinkHelper(param: UInt): (Bool, UInt, UInt) = {
import ClientStates._
import TLPermissions._
MuxTLookup(Cat(param, state), (false.B, 0.U, 0.U), Seq(
//(wanted, am now) -> (hasDirtyData resp, next)
Cat(toT, Dirty) -> (true.B, TtoT, Trunk),
Cat(toT, Trunk) -> (false.B, TtoT, Trunk),
Cat(toT, Branch) -> (false.B, BtoB, Branch),
Cat(toT, Nothing) -> (false.B, NtoN, Nothing),
Cat(toB, Dirty) -> (true.B, TtoB, Branch),
Cat(toB, Trunk) -> (false.B, TtoB, Branch), // Policy: Don't notify on clean downgrade
Cat(toB, Branch) -> (false.B, BtoB, Branch),
Cat(toB, Nothing) -> (false.B, NtoN, Nothing),
Cat(toN, Dirty) -> (true.B, TtoN, Nothing),
Cat(toN, Trunk) -> (false.B, TtoN, Nothing), // Policy: Don't notify on clean downgrade
Cat(toN, Branch) -> (false.B, BtoN, Nothing), // Policy: Don't notify on clean downgrade
Cat(toN, Nothing) -> (false.B, NtoN, Nothing)))
}
/** Translate cache control cmds into Probe param */
private def cmdToPermCap(cmd: UInt): UInt = {
import MemoryOpCategories._
import TLPermissions._
MuxLookup(cmd, toN)(Seq(
M_FLUSH -> toN,
M_PRODUCE -> toB,
M_CLEAN -> toT))
}
def onCacheControl(cmd: UInt): (Bool, UInt, ClientMetadata) = {
val r = shrinkHelper(cmdToPermCap(cmd))
(r._1, r._2, ClientMetadata(r._3))
}
def onProbe(param: UInt): (Bool, UInt, ClientMetadata) = {
val r = shrinkHelper(param)
(r._1, r._2, ClientMetadata(r._3))
}
}
/** Factories for ClientMetadata, including on reset */
object ClientMetadata {
def apply(perm: UInt) = {
val meta = Wire(new ClientMetadata)
meta.state := perm
meta
}
def onReset = ClientMetadata(ClientStates.Nothing)
def maximum = ClientMetadata(ClientStates.Dirty)
}
File dcache.scala:
//******************************************************************************
// Ported from Rocket-Chip
// See LICENSE.Berkeley and LICENSE.SiFive in Rocket-Chip for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v4.lsu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.tile._
import freechips.rocketchip.util._
import freechips.rocketchip.rocket._
import boom.v4.common._
import boom.v4.exu.BrUpdateInfo
import boom.v4.util._
class BoomWritebackUnit(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) {
val io = IO(new Bundle {
val req = Flipped(Decoupled(new WritebackReq(edge.bundle)))
val meta_read = Decoupled(new L1MetaReadReq)
val resp = Output(Bool())
val idx = Output(Valid(UInt()))
val data_req = Decoupled(new L1DataReadReq)
val data_resp = Input(UInt(encRowBits.W))
val mem_grant = Input(Bool())
val release = Decoupled(new TLBundleC(edge.bundle))
val lsu_release = Decoupled(new TLBundleC(edge.bundle))
})
val req = Reg(new WritebackReq(edge.bundle))
val s_invalid :: s_fill_buffer :: s_lsu_release :: s_active :: s_grant :: Nil = Enum(5)
val state = RegInit(s_invalid)
val r1_data_req_fired = RegInit(false.B)
val r2_data_req_fired = RegInit(false.B)
val r1_data_req_cnt = Reg(UInt(log2Up(refillCycles+1).W))
val r2_data_req_cnt = Reg(UInt(log2Up(refillCycles+1).W))
val data_req_cnt = RegInit(0.U(log2Up(refillCycles+1).W))
val (_, last_beat, all_beats_done, beat_count) = edge.count(io.release)
val wb_buffer = Reg(Vec(refillCycles, UInt(encRowBits.W)))
val acked = RegInit(false.B)
io.idx.valid := state =/= s_invalid
io.idx.bits := req.idx
io.release.valid := false.B
io.release.bits := DontCare
io.req.ready := false.B
io.meta_read.valid := false.B
io.meta_read.bits := DontCare
io.data_req.valid := false.B
io.data_req.bits := DontCare
io.resp := false.B
io.lsu_release.valid := false.B
io.lsu_release.bits := DontCare
val r_address = Cat(req.tag, req.idx) << blockOffBits
val id = cfg.nMSHRs
val probeResponse = edge.ProbeAck(
fromSource = req.source,
toAddress = r_address,
lgSize = lgCacheBlockBytes.U,
reportPermissions = req.param,
data = wb_buffer(data_req_cnt))
val voluntaryRelease = edge.Release(
fromSource = id.U,
toAddress = r_address,
lgSize = lgCacheBlockBytes.U,
shrinkPermissions = req.param,
data = wb_buffer(data_req_cnt))._2
when (state === s_invalid) {
io.req.ready := true.B
when (io.req.fire) {
state := s_fill_buffer
data_req_cnt := 0.U
req := io.req.bits
acked := false.B
}
} .elsewhen (state === s_fill_buffer) {
io.meta_read.valid := data_req_cnt < refillCycles.U
io.meta_read.bits.idx := req.idx
io.meta_read.bits.tag := req.tag
io.data_req.valid := data_req_cnt < refillCycles.U
io.data_req.bits.way_en := req.way_en
io.data_req.bits.addr := (if(refillCycles > 1)
Cat(req.idx, data_req_cnt(log2Up(refillCycles)-1,0))
else req.idx) << rowOffBits
r1_data_req_fired := false.B
r1_data_req_cnt := 0.U
r2_data_req_fired := r1_data_req_fired
r2_data_req_cnt := r1_data_req_cnt
when (io.data_req.fire && io.meta_read.fire) {
r1_data_req_fired := true.B
r1_data_req_cnt := data_req_cnt
data_req_cnt := data_req_cnt + 1.U
}
when (r2_data_req_fired) {
wb_buffer(r2_data_req_cnt) := io.data_resp
when (r2_data_req_cnt === (refillCycles-1).U) {
io.resp := true.B
state := s_lsu_release
data_req_cnt := 0.U
}
}
} .elsewhen (state === s_lsu_release) {
io.lsu_release.valid := true.B
io.lsu_release.bits := probeResponse
when (io.lsu_release.fire) {
state := s_active
}
} .elsewhen (state === s_active) {
io.release.valid := data_req_cnt < refillCycles.U
io.release.bits := Mux(req.voluntary, voluntaryRelease, probeResponse)
when (io.mem_grant) {
acked := true.B
}
when (io.release.fire) {
data_req_cnt := data_req_cnt + 1.U
}
when ((data_req_cnt === (refillCycles-1).U) && io.release.fire) {
state := Mux(req.voluntary, s_grant, s_invalid)
}
} .elsewhen (state === s_grant) {
when (io.mem_grant) {
acked := true.B
}
when (acked) {
state := s_invalid
}
}
}
class BoomProbeUnit(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) {
val io = IO(new Bundle {
val req = Flipped(Decoupled(new TLBundleB(edge.bundle)))
val rep = Decoupled(new TLBundleC(edge.bundle))
val meta_read = Decoupled(new L1MetaReadReq)
val meta_write = Decoupled(new L1MetaWriteReq)
val wb_req = Decoupled(new WritebackReq(edge.bundle))
val way_en = Input(UInt(nWays.W))
val wb_rdy = Input(Bool()) // Is writeback unit currently busy? If so need to retry meta read when its done
val mshr_rdy = Input(Bool()) // Is MSHR ready for this request to proceed?
val mshr_wb_rdy = Output(Bool()) // Should we block MSHR writebacks while we finish our own?
val block_state = Input(new ClientMetadata())
val lsu_release = Decoupled(new TLBundleC(edge.bundle))
val state = Output(Valid(UInt(coreMaxAddrBits.W)))
})
val (s_invalid :: s_meta_read :: s_meta_resp :: s_mshr_req ::
s_mshr_resp :: s_lsu_release :: s_release :: s_writeback_req :: s_writeback_resp ::
s_meta_write :: s_meta_write_resp :: Nil) = Enum(11)
val state = RegInit(s_invalid)
val req = Reg(new TLBundleB(edge.bundle))
val req_idx = req.address(idxMSB, idxLSB)
val req_tag = req.address >> untagBits
val way_en = Reg(UInt())
val tag_matches = way_en.orR
val old_coh = Reg(new ClientMetadata)
val miss_coh = ClientMetadata.onReset
val reply_coh = Mux(tag_matches, old_coh, miss_coh)
val (is_dirty, report_param, new_coh) = reply_coh.onProbe(req.param)
io.state.valid := state =/= s_invalid
io.state.bits := req.address
io.req.ready := state === s_invalid
io.rep.valid := state === s_release
io.rep.bits := edge.ProbeAck(req, report_param)
assert(!io.rep.valid || !edge.hasData(io.rep.bits),
"ProbeUnit should not send ProbeAcks with data, WritebackUnit should handle it")
io.meta_read.valid := state === s_meta_read
io.meta_read.bits.idx := req_idx
io.meta_read.bits.tag := req_tag
io.meta_read.bits.way_en := ~(0.U(nWays.W))
io.meta_write.valid := state === s_meta_write
io.meta_write.bits.way_en := way_en
io.meta_write.bits.idx := req_idx
io.meta_write.bits.tag := req_tag
io.meta_write.bits.data.tag := req_tag
io.meta_write.bits.data.coh := new_coh
io.wb_req.valid := state === s_writeback_req
io.wb_req.bits.source := req.source
io.wb_req.bits.idx := req_idx
io.wb_req.bits.tag := req_tag
io.wb_req.bits.param := report_param
io.wb_req.bits.way_en := way_en
io.wb_req.bits.voluntary := false.B
io.mshr_wb_rdy := !state.isOneOf(s_release, s_writeback_req, s_writeback_resp, s_meta_write, s_meta_write_resp)
io.lsu_release.valid := state === s_lsu_release
io.lsu_release.bits := edge.ProbeAck(req, report_param)
// state === s_invalid
when (state === s_invalid) {
when (io.req.fire) {
state := s_meta_read
req := io.req.bits
}
} .elsewhen (state === s_meta_read) {
when (io.meta_read.fire) {
state := s_meta_resp
}
} .elsewhen (state === s_meta_resp) {
// we need to wait one cycle for the metadata to be read from the array
state := s_mshr_req
} .elsewhen (state === s_mshr_req) {
old_coh := io.block_state
way_en := io.way_en
// if the read didn't go through, we need to retry
state := Mux(io.mshr_rdy && io.wb_rdy, s_mshr_resp, s_meta_read)
} .elsewhen (state === s_mshr_resp) {
state := Mux(tag_matches && is_dirty, s_writeback_req, s_lsu_release)
} .elsewhen (state === s_lsu_release) {
when (io.lsu_release.fire) {
state := s_release
}
} .elsewhen (state === s_release) {
when (io.rep.ready) {
state := Mux(tag_matches, s_meta_write, s_invalid)
}
} .elsewhen (state === s_writeback_req) {
when (io.wb_req.fire) {
state := s_writeback_resp
}
} .elsewhen (state === s_writeback_resp) {
// wait for the writeback request to finish before updating the metadata
when (io.wb_req.ready) {
state := s_meta_write
}
} .elsewhen (state === s_meta_write) {
when (io.meta_write.fire) {
state := s_meta_write_resp
}
} .elsewhen (state === s_meta_write_resp) {
state := s_invalid
}
}
class BoomL1MetaReadReq(implicit p: Parameters) extends BoomBundle()(p) {
val req = Vec(lsuWidth, new L1MetaReadReq)
}
class BoomL1DataReadReq(implicit p: Parameters) extends BoomBundle()(p) {
val req = Vec(lsuWidth, new L1DataReadReq)
val valid = Vec(lsuWidth, Bool())
}
abstract class AbstractBoomDataArray(implicit p: Parameters) extends BoomModule with HasL1HellaCacheParameters {
val io = IO(new BoomBundle {
val read = Input(Vec(lsuWidth, Valid(new L1DataReadReq)))
val write = Input(Valid(new L1DataWriteReq))
val resp = Output(Vec(lsuWidth, Vec(nWays, Bits(encRowBits.W))))
val s1_nacks = Output(Vec(lsuWidth, Bool()))
})
def pipeMap[T <: Data](f: Int => T) = VecInit((0 until lsuWidth).map(f))
}
class BoomDuplicatedDataArray(implicit p: Parameters) extends AbstractBoomDataArray
{
val waddr = io.write.bits.addr >> rowOffBits
for (j <- 0 until lsuWidth) {
val raddr = io.read(j).bits.addr >> rowOffBits
for (w <- 0 until nWays) {
val array = DescribedSRAM(
name = s"array_${w}_${j}",
desc = "Non-blocking DCache Data Array",
size = nSets * refillCycles,
data = Vec(rowWords, Bits(encDataBits.W))
)
when (io.write.bits.way_en(w) && io.write.valid) {
val data = VecInit((0 until rowWords) map (i => io.write.bits.data(encDataBits*(i+1)-1,encDataBits*i)))
array.write(waddr, data, io.write.bits.wmask.asBools)
}
if (dcacheSinglePorted)
io.resp(j)(w) := RegNext(array.read(raddr, io.read(j).bits.way_en(w) && io.read(j).valid).asUInt)
else
io.resp(j)(w) := RegNext(array.read(raddr, io.read(j).valid).asUInt)
}
io.s1_nacks(j) := false.B
}
}
class BoomBankedDataArray(implicit p: Parameters) extends AbstractBoomDataArray {
val nBanks = boomParams.numDCacheBanks
val bankSize = nSets * refillCycles / nBanks
require (nBanks >= lsuWidth)
require (bankSize > 0)
val bankBits = log2Ceil(nBanks)
val bankOffBits = log2Ceil(rowWords) + log2Ceil(wordBytes)
val bidxBits = log2Ceil(bankSize)
val bidxOffBits = bankOffBits + bankBits
//----------------------------------------------------------------------------------------------------
val s0_rbanks = if (nBanks > 1) VecInit(io.read.map(r => (r.bits.addr >> bankOffBits)(bankBits-1,0))) else VecInit(0.U)
val s0_wbank = if (nBanks > 1) (io.write.bits.addr >> bankOffBits)(bankBits-1,0) else 0.U
val s0_ridxs = VecInit(io.read.map(r => (r.bits.addr >> bidxOffBits)(bidxBits-1,0)))
val s0_widx = (io.write.bits.addr >> bidxOffBits)(bidxBits-1,0)
val s0_read_valids = VecInit(io.read.map(_.valid))
val s0_bank_conflicts = pipeMap(w => {
((s0_rbanks(w) === s0_wbank) && io.write.valid && dcacheSinglePorted.B) ||
(0 until w).foldLeft(false.B)((c,i) => c || io.read(i).valid && s0_rbanks(i) === s0_rbanks(w))
})
val s0_do_bank_read = s0_read_valids zip s0_bank_conflicts map {case (v,c) => v && !c}
val s0_bank_read_gnts = Transpose(VecInit(s0_rbanks zip s0_do_bank_read map {case (b,d) => VecInit((UIntToOH(b) & Fill(nBanks,d)).asBools)}))
val s0_bank_write_gnt = (UIntToOH(s0_wbank) & Fill(nBanks, io.write.valid)).asBools
//----------------------------------------------------------------------------------------------------
val s1_rbanks = RegNext(s0_rbanks)
val s1_ridxs = RegNext(s0_ridxs)
val s1_read_valids = RegNext(s0_read_valids)
val s1_pipe_selection = pipeMap(i => VecInit(PriorityEncoderOH(pipeMap(j =>
if (j < i) s1_read_valids(j) && s1_rbanks(j) === s1_rbanks(i)
else if (j == i) true.B else false.B))))
val s1_ridx_match = pipeMap(i => pipeMap(j => if (j < i) s1_ridxs(j) === s1_ridxs(i)
else if (j == i) true.B else false.B))
val s1_nacks = pipeMap(w => s1_read_valids(w)
&& (!RegNext(s0_do_bank_read(w)) || (s1_pipe_selection(w).asUInt & ~s1_ridx_match(w).asUInt).orR)
)
val s1_bank_selection = pipeMap(w => Mux1H(s1_pipe_selection(w), s1_rbanks))
//----------------------------------------------------------------------------------------------------
val s2_bank_selection = RegNext(s1_bank_selection)
io.s1_nacks := s1_nacks
val data_arrays = Seq.tabulate(nBanks) {
b => DescribedSRAM(
name = s"array_${b}",
desc = "Boom DCache data array",
size = bankSize,
data = Vec(nWays * rowWords, Bits(encDataBits.W))
)
}
val s2_bank_reads = Reg(Vec(nBanks, Vec(nWays, Bits(encRowBits.W))))
for (b <- 0 until nBanks) {
val array = data_arrays(b)
val ridx = Mux1H(s0_bank_read_gnts(b), s0_ridxs)
val way_en = Mux1H(s0_bank_read_gnts(b), io.read.map(_.bits.way_en))
val write_en = s0_bank_write_gnt(b)
val write_mask = Cat(Seq.tabulate(nWays) { w =>
Mux(io.write.bits.way_en(w), io.write.bits.wmask, 0.U(rowWords.W))
}.reverse).asBools
val read_en = WireInit(s0_bank_read_gnts(b).reduce(_||_))
s2_bank_reads(b) := (if (dcacheSinglePorted) {
assert(!(read_en && write_en))
array.read(ridx, !write_en && read_en)
} else {
array.read(ridx, read_en)
}).asTypeOf(Vec(nWays, Bits(encRowBits.W)))
when (write_en) {
val data = Wire(Vec(nWays * rowWords, Bits(encDataBits.W)))
for (w <- 0 until nWays) {
for (i <- 0 until rowWords) {
data(w*rowWords+i) := io.write.bits.data(encDataBits*(i+1)-1,encDataBits*i)
}
}
array.write(s0_widx, data, write_mask)
}
}
for (w <- 0 until nWays) {
for (i <- 0 until lsuWidth) {
io.resp(i)(w) := s2_bank_reads(s2_bank_selection(i))(w)
}
}
}
/**
* Top level class wrapping a non-blocking dcache.
*
* @param hartid hardware thread for the cache
*/
class BoomNonBlockingDCache(staticIdForMetadataUseOnly: Int)(implicit p: Parameters) extends LazyModule
{
private val tileParams = p(TileKey)
protected val cfg = tileParams.dcache.get
protected def cacheClientParameters = cfg.scratch.map(x => Seq()).getOrElse(Seq(TLMasterParameters.v1(
name = s"Core ${staticIdForMetadataUseOnly} DCache",
sourceId = IdRange(0, 1 max (cfg.nMSHRs + 1)),
supportsProbe = TransferSizes(cfg.blockBytes, cfg.blockBytes))))
protected def mmioClientParameters = Seq(TLMasterParameters.v1(
name = s"Core ${staticIdForMetadataUseOnly} DCache MMIO",
sourceId = IdRange(cfg.nMSHRs + 1, cfg.nMSHRs + 1 + cfg.nMMIOs),
requestFifo = true))
val node = TLClientNode(Seq(TLMasterPortParameters.v1(
cacheClientParameters ++ mmioClientParameters,
minLatency = 1)))
lazy val module = new BoomNonBlockingDCacheModule(this)
def flushOnFenceI = cfg.scratch.isEmpty && !node.edges.out(0).manager.managers.forall(m => !m.supportsAcquireT || !m.executable || m.regionType >= RegionType.TRACKED || m.regionType <= RegionType.IDEMPOTENT)
require(!tileParams.core.haveCFlush || cfg.scratch.isEmpty, "CFLUSH_D_L1 instruction requires a D$")
}
class BoomDCacheBundle(implicit p: Parameters, edge: TLEdgeOut) extends BoomBundle()(p) {
val errors = new DCacheErrors
val lsu = Flipped(new LSUDMemIO)
}
class BoomNonBlockingDCacheModule(outer: BoomNonBlockingDCache) extends LazyModuleImp(outer)
with HasL1HellaCacheParameters
with HasBoomCoreParameters
{
implicit val edge = outer.node.edges.out(0)
val (tl_out, _) = outer.node.out(0)
val io = IO(new BoomDCacheBundle)
io.errors := DontCare
private val fifoManagers = edge.manager.managers.filter(TLFIFOFixer.allVolatile)
fifoManagers.foreach { m =>
require (m.fifoId == fifoManagers.head.fifoId,
s"IOMSHRs must be FIFO for all regions with effects, but HellaCache sees ${m.nodePath.map(_.name)}")
}
def widthMap[T <: Data](f: Int => T) = VecInit((0 until lsuWidth).map(f))
val t_replay :: t_probe :: t_wb :: t_mshr_meta_read :: t_lsu :: t_prefetch :: Nil = Enum(6)
val wb = Module(new BoomWritebackUnit)
val prober = Module(new BoomProbeUnit)
val mshrs = Module(new BoomMSHRFile)
mshrs.io.clear_all := io.lsu.force_order
mshrs.io.brupdate := io.lsu.brupdate
mshrs.io.exception := io.lsu.exception
mshrs.io.rob_pnr_idx := io.lsu.rob_pnr_idx
mshrs.io.rob_head_idx := io.lsu.rob_head_idx
// tags
def onReset = L1Metadata(0.U, ClientMetadata.onReset)
val meta = Seq.fill(lsuWidth) { Module(new L1MetadataArray(onReset _)) }
val metaWriteArb = Module(new Arbiter(new L1MetaWriteReq, 2))
// 0 goes to MSHR refills, 1 goes to prober
val metaReadArb = Module(new Arbiter(new BoomL1MetaReadReq, 6))
// 0 goes to MSHR replays, 1 goes to prober, 2 goes to wb, 3 goes to MSHR meta read,
// 4 goes to pipeline, 5 goes to prefetcher
metaReadArb.io.in := DontCare
for (w <- 0 until lsuWidth) {
meta(w).io.write.valid := metaWriteArb.io.out.fire
meta(w).io.write.bits := metaWriteArb.io.out.bits
meta(w).io.read.valid := metaReadArb.io.out.valid
meta(w).io.read.bits := metaReadArb.io.out.bits.req(w)
}
metaReadArb.io.out.ready := meta.map(_.io.read.ready).reduce(_||_)
metaWriteArb.io.out.ready := meta.map(_.io.write.ready).reduce(_||_)
// data
val data = Module(if (boomParams.numDCacheBanks == 1) new BoomDuplicatedDataArray else new BoomBankedDataArray)
val dataWriteArb = Module(new Arbiter(new L1DataWriteReq, 2))
// 0 goes to pipeline, 1 goes to MSHR refills
val dataReadArb = Module(new Arbiter(new BoomL1DataReadReq, 3))
// 0 goes to MSHR replays, 1 goes to wb, 2 goes to pipeline
dataReadArb.io.in := DontCare
for (w <- 0 until lsuWidth) {
data.io.read(w).valid := dataReadArb.io.out.bits.valid(w) && dataReadArb.io.out.valid
data.io.read(w).bits := dataReadArb.io.out.bits.req(w)
}
dataReadArb.io.out.ready := true.B
data.io.write.valid := dataWriteArb.io.out.fire
data.io.write.bits := dataWriteArb.io.out.bits
dataWriteArb.io.out.ready := true.B
val singlePortedDCacheWrite = data.io.write.valid && dcacheSinglePorted.B
// ------------
// New requests
// In a 1-wide LSU, load/store wakeups and MSHR resps contend for same port, so
// we should block incoming requests when the MSHR trying to respond
val block_incoming_reqs = (lsuWidth == 1).B && mshrs.io.resp.valid
io.lsu.req.ready := metaReadArb.io.in(4).ready && dataReadArb.io.in(2).ready && !block_incoming_reqs
metaReadArb.io.in(4).valid := io.lsu.req.valid && !block_incoming_reqs
dataReadArb.io.in(2).valid := io.lsu.req.valid && !block_incoming_reqs
for (w <- 0 until lsuWidth) {
// Tag read for new requests
metaReadArb.io.in(4).bits.req(w).idx := io.lsu.req.bits(w).bits.addr >> blockOffBits
metaReadArb.io.in(4).bits.req(w).way_en := DontCare
metaReadArb.io.in(4).bits.req(w).tag := DontCare
// Data read for new requests
dataReadArb.io.in(2).bits.valid(w) := io.lsu.req.bits(w).valid
dataReadArb.io.in(2).bits.req(w).addr := io.lsu.req.bits(w).bits.addr
dataReadArb.io.in(2).bits.req(w).way_en := ~0.U(nWays.W)
}
// ------------
// MSHR Replays
val replay_req = Wire(Vec(lsuWidth, new BoomDCacheReq))
replay_req := DontCare
replay_req(0).uop := mshrs.io.replay.bits.uop
replay_req(0).addr := mshrs.io.replay.bits.addr
replay_req(0).data := mshrs.io.replay.bits.data
replay_req(0).is_hella := mshrs.io.replay.bits.is_hella
// Don't let replays get nacked due to conflict with dcache write
mshrs.io.replay.ready := metaReadArb.io.in(0).ready && dataReadArb.io.in(0).ready && !singlePortedDCacheWrite
// Tag read for MSHR replays
// We don't actually need to read the metadata, for replays we already know our way
metaReadArb.io.in(0).valid := mshrs.io.replay.valid && !singlePortedDCacheWrite
metaReadArb.io.in(0).bits.req(0).idx := mshrs.io.replay.bits.addr >> blockOffBits
metaReadArb.io.in(0).bits.req(0).way_en := DontCare
metaReadArb.io.in(0).bits.req(0).tag := DontCare
// Data read for MSHR replays
dataReadArb.io.in(0).valid := mshrs.io.replay.valid && !singlePortedDCacheWrite
dataReadArb.io.in(0).bits.req(0).addr := mshrs.io.replay.bits.addr
dataReadArb.io.in(0).bits.req(0).way_en := mshrs.io.replay.bits.way_en
dataReadArb.io.in(0).bits.valid := widthMap(w => (w == 0).B)
// -----------
// MSHR Meta read
val mshr_read_req = Wire(Vec(lsuWidth, new BoomDCacheReq))
mshr_read_req := DontCare
mshr_read_req(0).uop := NullMicroOp
mshr_read_req(0).addr := Cat(mshrs.io.meta_read.bits.tag, mshrs.io.meta_read.bits.idx) << blockOffBits
mshr_read_req(0).data := DontCare
mshr_read_req(0).is_hella := false.B
metaReadArb.io.in(3).valid := mshrs.io.meta_read.valid
metaReadArb.io.in(3).bits.req(0) := mshrs.io.meta_read.bits
mshrs.io.meta_read.ready := metaReadArb.io.in(3).ready
// -----------
// Write-backs
val wb_fire = wb.io.meta_read.fire && wb.io.data_req.fire
val wb_req = Wire(Vec(lsuWidth, new BoomDCacheReq))
wb_req := DontCare
wb_req(0).uop := NullMicroOp
wb_req(0).addr := Cat(wb.io.meta_read.bits.tag, wb.io.data_req.bits.addr)
wb_req(0).data := DontCare
wb_req(0).is_hella := false.B
// Couple the two decoupled interfaces of the WBUnit's meta_read and data_read
// Can't launch data read if possibility of conflict w. write
// Tag read for write-back
metaReadArb.io.in(2).valid := wb.io.meta_read.valid && !singlePortedDCacheWrite
metaReadArb.io.in(2).bits.req(0) := wb.io.meta_read.bits
wb.io.meta_read.ready := metaReadArb.io.in(2).ready && dataReadArb.io.in(1).ready && !singlePortedDCacheWrite
// Data read for write-back
dataReadArb.io.in(1).valid := wb.io.data_req.valid && !singlePortedDCacheWrite
dataReadArb.io.in(1).bits.req(0) := wb.io.data_req.bits
dataReadArb.io.in(1).bits.valid := widthMap(w => (w == 0).B)
wb.io.data_req.ready := metaReadArb.io.in(2).ready && dataReadArb.io.in(1).ready && !singlePortedDCacheWrite
assert(!(wb.io.meta_read.fire ^ wb.io.data_req.fire))
// -------
// Prober
val prober_fire = prober.io.meta_read.fire
val prober_req = Wire(Vec(lsuWidth, new BoomDCacheReq))
prober_req := DontCare
prober_req(0).uop := NullMicroOp
prober_req(0).addr := Cat(prober.io.meta_read.bits.tag, prober.io.meta_read.bits.idx) << blockOffBits
prober_req(0).data := DontCare
prober_req(0).is_hella := false.B
// Tag read for prober
metaReadArb.io.in(1).valid := prober.io.meta_read.valid
metaReadArb.io.in(1).bits.req(0) := prober.io.meta_read.bits
prober.io.meta_read.ready := metaReadArb.io.in(1).ready
// Prober does not need to read data array
// -------
// Prefetcher
val prefetch_fire = mshrs.io.prefetch.fire
val prefetch_req = Wire(Vec(lsuWidth, new BoomDCacheReq))
prefetch_req := DontCare
prefetch_req(0) := mshrs.io.prefetch.bits
// Tag read for prefetch
metaReadArb.io.in(5).valid := mshrs.io.prefetch.valid
metaReadArb.io.in(5).bits.req(0).idx := mshrs.io.prefetch.bits.addr >> blockOffBits
metaReadArb.io.in(5).bits.req(0).way_en := DontCare
metaReadArb.io.in(5).bits.req(0).tag := DontCare
mshrs.io.prefetch.ready := metaReadArb.io.in(5).ready
// Prefetch does not need to read data array
val s0_valid = Mux(io.lsu.req.fire, VecInit(io.lsu.req.bits.map(_.valid)),
Mux(mshrs.io.replay.fire || wb_fire || prober_fire || prefetch_fire || mshrs.io.meta_read.fire,
VecInit(1.U(lsuWidth.W).asBools), VecInit(0.U(lsuWidth.W).asBools)))
val s0_req = Mux(io.lsu.req.fire , VecInit(io.lsu.req.bits.map(_.bits)),
Mux(wb_fire , wb_req,
Mux(prober_fire , prober_req,
Mux(prefetch_fire , prefetch_req,
Mux(mshrs.io.meta_read.fire, mshr_read_req
, replay_req)))))
val s0_type = Mux(io.lsu.req.fire , t_lsu,
Mux(wb_fire , t_wb,
Mux(prober_fire , t_probe,
Mux(prefetch_fire , t_prefetch,
Mux(mshrs.io.meta_read.fire, t_mshr_meta_read
, t_replay)))))
// Does this request need to send a response or nack
val s0_send_resp_or_nack = Mux(io.lsu.req.fire, s0_valid,
VecInit(Mux(mshrs.io.replay.fire && isRead(mshrs.io.replay.bits.uop.mem_cmd), 1.U(lsuWidth.W), 0.U(lsuWidth.W)).asBools))
val s1_req = RegNext(s0_req)
for (w <- 0 until lsuWidth)
s1_req(w).uop.br_mask := GetNewBrMask(io.lsu.brupdate, s0_req(w).uop)
val s2_store_failed = Wire(Bool())
val s1_valid = widthMap(w =>
RegNext(s0_valid(w) &&
!IsKilledByBranch(io.lsu.brupdate, false.B, s0_req(w).uop) &&
!(io.lsu.exception && s0_req(w).uop.uses_ldq) &&
!(s2_store_failed && io.lsu.req.fire && s0_req(w).uop.uses_stq),
init=false.B))
for (w <- 0 until lsuWidth)
assert(!(io.lsu.s1_kill(w) && !RegNext(io.lsu.req.fire) && !RegNext(io.lsu.req.bits(w).valid)))
val s1_addr = s1_req.map(_.addr)
val s1_nack = s1_addr.map(a => a(idxMSB,idxLSB) === prober.io.meta_write.bits.idx && !prober.io.req.ready)
val s1_send_resp_or_nack = RegNext(s0_send_resp_or_nack)
val s1_type = RegNext(s0_type)
val s1_mshr_meta_read_way_en = RegNext(mshrs.io.meta_read.bits.way_en)
val s1_replay_way_en = RegNext(mshrs.io.replay.bits.way_en) // For replays, the metadata isn't written yet
val s1_wb_way_en = RegNext(wb.io.data_req.bits.way_en)
// tag check
def wayMap[T <: Data](f: Int => T) = VecInit((0 until nWays).map(f))
val s1_tag_eq_way = widthMap(i => wayMap((w: Int) => meta(i).io.resp(w).tag === (s1_addr(i) >> untagBits)).asUInt)
val s1_tag_match_way = widthMap(i =>
Mux(s1_type === t_replay, s1_replay_way_en,
Mux(s1_type === t_wb, s1_wb_way_en,
Mux(s1_type === t_mshr_meta_read, s1_mshr_meta_read_way_en,
wayMap((w: Int) => s1_tag_eq_way(i)(w) && meta(i).io.resp(w).coh.isValid()).asUInt))))
val s1_wb_idx_matches = widthMap(i => (s1_addr(i)(untagBits-1,blockOffBits) === wb.io.idx.bits) && wb.io.idx.valid)
for (w <- 0 until lsuWidth) {
io.lsu.s1_nack_advisory(w) := data.io.s1_nacks(w)
}
val s2_req = RegNext(s1_req)
val s2_type = RegNext(s1_type)
val s2_valid = widthMap(w =>
RegNext(s1_valid(w) &&
!io.lsu.s1_kill(w) &&
!IsKilledByBranch(io.lsu.brupdate, false.B, s1_req(w).uop) &&
!(io.lsu.exception && s1_req(w).uop.uses_ldq) &&
!(s2_store_failed && (s1_type === t_lsu) && s1_req(w).uop.uses_stq)))
for (w <- 0 until lsuWidth)
s2_req(w).uop.br_mask := GetNewBrMask(io.lsu.brupdate, s1_req(w).uop)
val s2_tag_match_way = RegNext(s1_tag_match_way)
val s2_tag_match = s2_tag_match_way.map(_.orR)
val s2_hit_state = widthMap(i => Mux1H(s2_tag_match_way(i), wayMap((w: Int) => RegNext(meta(i).io.resp(w).coh))))
val s2_has_permission = widthMap(w => s2_hit_state(w).onAccess(s2_req(w).uop.mem_cmd)._1)
val s2_new_hit_state = widthMap(w => s2_hit_state(w).onAccess(s2_req(w).uop.mem_cmd)._3)
val s2_hit = widthMap(w => (s2_tag_match(w) && s2_has_permission(w) && s2_hit_state(w) === s2_new_hit_state(w) && !mshrs.io.block_hit(w)) || s2_type.isOneOf(t_replay, t_wb))
val s2_nack = Wire(Vec(lsuWidth, Bool()))
assert(!(s2_type === t_replay && !s2_hit(0)), "Replays should always hit")
assert(!(s2_type === t_wb && !s2_hit(0)), "Writeback should always see data hit")
val s2_wb_idx_matches = RegNext(s1_wb_idx_matches)
// lr/sc
val debug_sc_fail_addr = RegInit(0.U)
val debug_sc_fail_cnt = RegInit(0.U(8.W))
val lrsc_count = RegInit(0.U(log2Ceil(lrscCycles).W))
val lrsc_valid = lrsc_count > lrscBackoff.U
val lrsc_addr = Reg(UInt())
val s2_lr = s2_req(0).uop.mem_cmd === M_XLR && (!RegNext(s1_nack(0)) || s2_type === t_replay)
val s2_sc = s2_req(0).uop.mem_cmd === M_XSC && (!RegNext(s1_nack(0)) || s2_type === t_replay)
val s2_lrsc_addr_match = widthMap(w => lrsc_valid && lrsc_addr === (s2_req(w).addr >> blockOffBits))
val s2_sc_fail = s2_sc && !s2_lrsc_addr_match(0)
when (lrsc_count > 0.U) { lrsc_count := lrsc_count - 1.U }
when (s2_valid(0) && ((s2_type === t_lsu && s2_hit(0) && !s2_nack(0)) ||
(s2_type === t_replay && s2_req(0).uop.mem_cmd =/= M_FLUSH_ALL))) {
when (s2_lr) {
lrsc_count := (lrscCycles - 1).U
lrsc_addr := s2_req(0).addr >> blockOffBits
}
when (lrsc_count > 0.U) {
lrsc_count := 0.U
}
}
for (w <- 0 until lsuWidth) {
when (s2_valid(w) &&
s2_type === t_lsu &&
!s2_hit(w) &&
!(s2_has_permission(w) && s2_tag_match(w)) &&
s2_lrsc_addr_match(w) &&
!s2_nack(w)) {
lrsc_count := 0.U
}
}
when (s2_valid(0)) {
when (s2_req(0).addr === debug_sc_fail_addr) {
when (s2_sc_fail) {
debug_sc_fail_cnt := debug_sc_fail_cnt + 1.U
} .elsewhen (s2_sc) {
debug_sc_fail_cnt := 0.U
}
} .otherwise {
when (s2_sc_fail) {
debug_sc_fail_addr := s2_req(0).addr
debug_sc_fail_cnt := 1.U
}
}
}
assert(debug_sc_fail_cnt < 100.U, "L1DCache failed too many SCs in a row")
val s2_data = Wire(Vec(lsuWidth, Vec(nWays, UInt(encRowBits.W))))
for (i <- 0 until lsuWidth) {
for (w <- 0 until nWays) {
s2_data(i)(w) := data.io.resp(i)(w)
}
}
val s2_data_muxed = widthMap(w => Mux1H(s2_tag_match_way(w), s2_data(w)))
val s2_word_idx = widthMap(w => if (rowWords == 1) 0.U else s2_req(w).addr(log2Up(rowWords*wordBytes)-1, log2Up(wordBytes)))
// replacement policy
val replacer = cacheParams.replacement
val s1_replaced_way_en = UIntToOH(replacer.way)
val s2_replaced_way_en = UIntToOH(RegNext(replacer.way))
val s2_repl_meta = widthMap(i => Mux1H(s2_replaced_way_en, wayMap((w: Int) => RegNext(meta(i).io.resp(w))).toSeq))
// nack because of incoming probe
val s2_nack_hit = RegNext(VecInit(s1_nack))
// Nack when we hit something currently being evicted
val s2_nack_victim = widthMap(w => s2_valid(w) && s2_hit(w) && mshrs.io.secondary_miss(w))
// MSHRs not ready for request
val s2_nack_miss = widthMap(w => s2_valid(w) && !s2_hit(w) && !mshrs.io.req(w).ready)
// Bank conflict on data arrays
val s2_nack_data = widthMap(w => s2_valid(w) && RegNext(data.io.s1_nacks(w)))
// Can't allocate MSHR for same set currently being written back
val s2_nack_wb = widthMap(w => s2_valid(w) && !s2_hit(w) && s2_wb_idx_matches(w))
s2_nack := widthMap(w => (s2_nack_miss(w) || s2_nack_hit(w) || s2_nack_victim(w) || s2_nack_data(w) || s2_nack_wb(w)) && s2_type =/= t_replay)
assert(!(s2_nack_data.reduce(_||_) && s2_type.isOneOf(t_replay, t_wb)))
val s2_send_resp = widthMap(w => (
RegNext(s1_send_resp_or_nack(w)) &&
(!(s2_nack_hit(w) || s2_nack_victim(w) || s2_nack_data(w)) || s2_type === t_replay) &&
s2_hit(w) && isRead(s2_req(w).uop.mem_cmd)
))
val s2_send_store_ack = widthMap(w => (
RegNext(s1_send_resp_or_nack(w)) && !s2_nack(w) && isWrite(s2_req(w).uop.mem_cmd) &&
(s2_hit(w) || mshrs.io.req(w).fire)))
val s2_send_nack = widthMap(w => (RegNext(s1_send_resp_or_nack(w)) && s2_nack(w)))
for (w <- 0 until lsuWidth)
assert(!(s2_send_resp(w) && s2_send_nack(w)))
// hits always send a response
// If MSHR is not available, LSU has to replay this request later
// If MSHR is available and this is only a store(not a amo), we don't need to wait for resp later
s2_store_failed := s2_valid(0) && s2_nack(0) && s2_send_nack(0) && s2_req(0).uop.uses_stq
// Miss handling
for (w <- 0 until lsuWidth) {
mshrs.io.req(w).valid := s2_valid(w) &&
!s2_hit(w) &&
!s2_nack_hit(w) &&
!s2_nack_victim(w) &&
!s2_nack_data(w) &&
!s2_nack_wb(w) &&
s2_type.isOneOf(t_lsu, t_prefetch) &&
!(io.lsu.exception && s2_req(w).uop.uses_ldq) &&
(isPrefetch(s2_req(w).uop.mem_cmd) ||
isRead(s2_req(w).uop.mem_cmd) ||
isWrite(s2_req(w).uop.mem_cmd))
assert(!(mshrs.io.req(w).valid && s2_type === t_replay), "Replays should not need to go back into MSHRs")
mshrs.io.req(w).bits := DontCare
mshrs.io.req(w).bits.uop := s2_req(w).uop
mshrs.io.req(w).bits.addr := s2_req(w).addr
mshrs.io.req(w).bits.tag_match := s2_tag_match(w)
mshrs.io.req(w).bits.old_meta := Mux(s2_tag_match(w), L1Metadata(s2_repl_meta(w).tag, s2_hit_state(w)), s2_repl_meta(w))
mshrs.io.req(w).bits.way_en := Mux(s2_tag_match(w), s2_tag_match_way(w), s2_replaced_way_en)
mshrs.io.req(w).bits.data := s2_req(w).data
mshrs.io.req(w).bits.is_hella := s2_req(w).is_hella
mshrs.io.req_is_probe(w) := s2_type === t_probe && s2_valid(w)
}
mshrs.io.meta_resp.valid := !s2_nack_hit(0) || prober.io.mshr_wb_rdy
mshrs.io.meta_resp.bits := Mux1H(s2_tag_match_way(0), RegNext(meta(0).io.resp))
when (mshrs.io.req.map(_.fire).reduce(_||_)) { replacer.miss }
tl_out.a <> mshrs.io.mem_acquire
// probes and releases
prober.io.req.valid := tl_out.b.valid && !lrsc_valid
tl_out.b.ready := prober.io.req.ready && !lrsc_valid
prober.io.req.bits := tl_out.b.bits
prober.io.way_en := s2_tag_match_way(0)
prober.io.block_state := s2_hit_state(0)
metaWriteArb.io.in(1) <> prober.io.meta_write
prober.io.mshr_rdy := mshrs.io.probe_rdy
prober.io.wb_rdy := (prober.io.meta_write.bits.idx =/= wb.io.idx.bits) || !wb.io.idx.valid
mshrs.io.prober_state := prober.io.state
// refills
when (tl_out.d.bits.source === cfg.nMSHRs.U) {
// This should be ReleaseAck
tl_out.d.ready := true.B
mshrs.io.mem_grant.valid := false.B
mshrs.io.mem_grant.bits := DontCare
} .otherwise {
// This should be GrantData
mshrs.io.mem_grant <> tl_out.d
}
dataWriteArb.io.in(1) <> mshrs.io.refill
metaWriteArb.io.in(0) <> mshrs.io.meta_write
tl_out.e <> mshrs.io.mem_finish
// writebacks
val wbArb = Module(new Arbiter(new WritebackReq(edge.bundle), 2))
// 0 goes to prober, 1 goes to MSHR evictions
wbArb.io.in(0) <> prober.io.wb_req
wbArb.io.in(1) <> mshrs.io.wb_req
wb.io.req <> wbArb.io.out
wb.io.data_resp := s2_data_muxed(0)
mshrs.io.wb_resp := wb.io.resp
wb.io.mem_grant := tl_out.d.fire && tl_out.d.bits.source === cfg.nMSHRs.U
val lsu_release_arb = Module(new Arbiter(new TLBundleC(edge.bundle), 2))
io.lsu.release <> lsu_release_arb.io.out
lsu_release_arb.io.in(0) <> wb.io.lsu_release
lsu_release_arb.io.in(1) <> prober.io.lsu_release
TLArbiter.lowest(edge, tl_out.c, wb.io.release, prober.io.rep)
io.lsu.perf.release := edge.done(tl_out.c)
io.lsu.perf.acquire := edge.done(tl_out.a)
// load data gen
val s2_data_word_prebypass = widthMap(w => s2_data_muxed(w) >> Cat(s2_word_idx(w), 0.U(log2Ceil(coreDataBits).W)))
val s2_data_word = Wire(Vec(lsuWidth, UInt()))
val loadgen = (0 until lsuWidth).map { w =>
new LoadGen(s2_req(w).uop.mem_size, s2_req(w).uop.mem_signed, s2_req(w).addr,
s2_data_word(w), s2_sc && (w == 0).B, wordBytes)
}
// Mux between cache responses and uncache responses
for (w <- 0 until lsuWidth) {
io.lsu.resp(w).valid := s2_valid(w) && s2_send_resp(w)
io.lsu.resp(w).bits.uop := s2_req(w).uop
io.lsu.resp(w).bits.data := loadgen(w).data | s2_sc_fail
io.lsu.resp(w).bits.is_hella := s2_req(w).is_hella
io.lsu.nack(w).valid := s2_valid(w) && s2_send_nack(w)
io.lsu.nack(w).bits := s2_req(w)
assert(!(io.lsu.nack(w).valid && s2_type =/= t_lsu))
io.lsu.store_ack(w).valid := s2_valid(w) && s2_send_store_ack(w) && (w == 0).B
io.lsu.store_ack(w).bits := s2_req(w)
}
io.lsu.ll_resp <> mshrs.io.resp
// Store/amo hits
val s3_req = Wire(new BoomDCacheReq)
s3_req := RegNext(s2_req(0))
val s3_valid = RegNext(s2_valid(0) && s2_hit(0) && isWrite(s2_req(0).uop.mem_cmd) &&
!s2_sc_fail && !(s2_send_nack(0) && s2_nack(0)))
val s3_data_word = RegNext(s2_data_word(0))
for (w <- 1 until lsuWidth) {
assert(!(s2_valid(w) && s2_hit(w) && isWrite(s2_req(w).uop.mem_cmd) &&
!s2_sc_fail && !(s2_send_nack(w) && s2_nack(w))),
"Store must go through 0th pipe in L1D")
}
// For bypassing
val s4_req = RegNext(s3_req)
val s4_valid = RegNext(s3_valid)
val s5_req = RegNext(s4_req)
val s5_valid = RegNext(s4_valid)
val s3_bypass = widthMap(w => s3_valid && ((s2_req(w).addr >> wordOffBits) === (s3_req.addr >> wordOffBits)))
val s4_bypass = widthMap(w => s4_valid && ((s2_req(w).addr >> wordOffBits) === (s4_req.addr >> wordOffBits)))
val s5_bypass = widthMap(w => s5_valid && ((s2_req(w).addr >> wordOffBits) === (s5_req.addr >> wordOffBits)))
// Store -> Load bypassing
for (w <- 0 until lsuWidth) {
s2_data_word(w) := Mux(s3_bypass(w), s3_req.data,
Mux(s4_bypass(w), s4_req.data,
Mux(s5_bypass(w), s5_req.data,
s2_data_word_prebypass(w))))
}
val amoalu = Module(new AMOALU(xLen))
amoalu.io.mask := new StoreGen(s3_req.uop.mem_size, s3_req.addr, 0.U, xLen/8).mask
amoalu.io.cmd := s3_req.uop.mem_cmd
amoalu.io.lhs := s3_data_word
amoalu.io.rhs := RegNext(s2_req(0).data)
s3_req.data := amoalu.io.out
val s3_way = RegNext(s2_tag_match_way(0))
dataWriteArb.io.in(0).valid := s3_valid
dataWriteArb.io.in(0).bits.addr := s3_req.addr
dataWriteArb.io.in(0).bits.wmask := UIntToOH(s3_req.addr.extract(rowOffBits-1,offsetlsb))
dataWriteArb.io.in(0).bits.data := Fill(rowWords, s3_req.data)
dataWriteArb.io.in(0).bits.way_en := s3_way
io.lsu.ordered := mshrs.io.fence_rdy && !s1_valid.reduce(_||_) && !s2_valid.reduce(_||_)
}
File Replacement.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import freechips.rocketchip.util.property.cover
abstract class ReplacementPolicy {
def nBits: Int
def perSet: Boolean
def way: UInt
def miss: Unit
def hit: Unit
def access(touch_way: UInt): Unit
def access(touch_ways: Seq[Valid[UInt]]): Unit
def state_read: UInt
def get_next_state(state: UInt, touch_way: UInt): UInt
def get_next_state(state: UInt, touch_ways: Seq[Valid[UInt]]): UInt = {
touch_ways.foldLeft(state)((prev, touch_way) => Mux(touch_way.valid, get_next_state(prev, touch_way.bits), prev))
}
def get_replace_way(state: UInt): UInt
}
object ReplacementPolicy {
def fromString(s: String, n_ways: Int): ReplacementPolicy = s.toLowerCase match {
case "random" => new RandomReplacement(n_ways)
case "lru" => new TrueLRU(n_ways)
case "plru" => new PseudoLRU(n_ways)
case t => throw new IllegalArgumentException(s"unknown Replacement Policy type $t")
}
}
class RandomReplacement(n_ways: Int) extends ReplacementPolicy {
private val replace = Wire(Bool())
replace := false.B
def nBits = 16
def perSet = false
private val lfsr = LFSR(nBits, replace)
def state_read = WireDefault(lfsr)
def way = Random(n_ways, lfsr)
def miss = replace := true.B
def hit = {}
def access(touch_way: UInt) = {}
def access(touch_ways: Seq[Valid[UInt]]) = {}
def get_next_state(state: UInt, touch_way: UInt) = 0.U //DontCare
def get_replace_way(state: UInt) = way
}
abstract class SeqReplacementPolicy {
def access(set: UInt): Unit
def update(valid: Bool, hit: Bool, set: UInt, way: UInt): Unit
def way: UInt
}
abstract class SetAssocReplacementPolicy {
def access(set: UInt, touch_way: UInt): Unit
def access(sets: Seq[UInt], touch_ways: Seq[Valid[UInt]]): Unit
def way(set: UInt): UInt
}
class SeqRandom(n_ways: Int) extends SeqReplacementPolicy {
val logic = new RandomReplacement(n_ways)
def access(set: UInt) = { }
def update(valid: Bool, hit: Bool, set: UInt, way: UInt) = {
when (valid && !hit) { logic.miss }
}
def way = logic.way
}
class TrueLRU(n_ways: Int) extends ReplacementPolicy {
// True LRU replacement policy, using a triangular matrix to track which sets are more recently used than others.
// The matrix is packed into a single UInt (or Bits). Example 4-way (6-bits):
// [5] - 3 more recent than 2
// [4] - 3 more recent than 1
// [3] - 2 more recent than 1
// [2] - 3 more recent than 0
// [1] - 2 more recent than 0
// [0] - 1 more recent than 0
def nBits = (n_ways * (n_ways-1)) / 2
def perSet = true
private val state_reg = RegInit(0.U(nBits.W))
def state_read = WireDefault(state_reg)
private def extractMRUVec(state: UInt): Seq[UInt] = {
// Extract per-way information about which higher-indexed ways are more recently used
val moreRecentVec = Wire(Vec(n_ways-1, UInt(n_ways.W)))
var lsb = 0
for (i <- 0 until n_ways-1) {
moreRecentVec(i) := Cat(state(lsb+n_ways-i-2,lsb), 0.U((i+1).W))
lsb = lsb + (n_ways - i - 1)
}
moreRecentVec
}
def get_next_state(state: UInt, touch_way: UInt): UInt = {
val nextState = Wire(Vec(n_ways-1, UInt(n_ways.W)))
val moreRecentVec = extractMRUVec(state) // reconstruct lower triangular matrix
val wayDec = UIntToOH(touch_way, n_ways)
// Compute next value of triangular matrix
// set the touched way as more recent than every other way
nextState.zipWithIndex.map { case (e, i) =>
e := Mux(i.U === touch_way, 0.U(n_ways.W), moreRecentVec(i) | wayDec)
}
nextState.zipWithIndex.tail.foldLeft((nextState.head.apply(n_ways-1,1),0)) { case ((pe,pi),(ce,ci)) => (Cat(ce.apply(n_ways-1,ci+1), pe), ci) }._1
}
def access(touch_way: UInt): Unit = {
state_reg := get_next_state(state_reg, touch_way)
}
def access(touch_ways: Seq[Valid[UInt]]): Unit = {
when (touch_ways.map(_.valid).orR) {
state_reg := get_next_state(state_reg, touch_ways)
}
for (i <- 1 until touch_ways.size) {
cover(PopCount(touch_ways.map(_.valid)) === i.U, s"LRU_UpdateCount$i", s"LRU Update $i simultaneous")
}
}
def get_replace_way(state: UInt): UInt = {
val moreRecentVec = extractMRUVec(state) // reconstruct lower triangular matrix
// For each way, determine if all other ways are more recent
val mruWayDec = (0 until n_ways).map { i =>
val upperMoreRecent = (if (i == n_ways-1) true.B else moreRecentVec(i).apply(n_ways-1,i+1).andR)
val lowerMoreRecent = (if (i == 0) true.B else moreRecentVec.map(e => !e(i)).reduce(_ && _))
upperMoreRecent && lowerMoreRecent
}
OHToUInt(mruWayDec)
}
def way = get_replace_way(state_reg)
def miss = access(way)
def hit = {}
@deprecated("replace 'replace' with 'way' from abstract class ReplacementPolicy","Rocket Chip 2020.05")
def replace: UInt = way
}
class PseudoLRU(n_ways: Int) extends ReplacementPolicy {
// Pseudo-LRU tree algorithm: https://en.wikipedia.org/wiki/Pseudo-LRU#Tree-PLRU
//
//
// - bits storage example for 4-way PLRU binary tree:
// bit[2]: ways 3+2 older than ways 1+0
// / \
// bit[1]: way 3 older than way 2 bit[0]: way 1 older than way 0
//
//
// - bits storage example for 3-way PLRU binary tree:
// bit[1]: way 2 older than ways 1+0
// \
// bit[0]: way 1 older than way 0
//
//
// - bits storage example for 8-way PLRU binary tree:
// bit[6]: ways 7-4 older than ways 3-0
// / \
// bit[5]: ways 7+6 > 5+4 bit[2]: ways 3+2 > 1+0
// / \ / \
// bit[4]: way 7>6 bit[3]: way 5>4 bit[1]: way 3>2 bit[0]: way 1>0
def nBits = n_ways - 1
def perSet = true
private val state_reg = if (nBits == 0) Reg(UInt(0.W)) else RegInit(0.U(nBits.W))
def state_read = WireDefault(state_reg)
def access(touch_way: UInt): Unit = {
state_reg := get_next_state(state_reg, touch_way)
}
def access(touch_ways: Seq[Valid[UInt]]): Unit = {
when (touch_ways.map(_.valid).orR) {
state_reg := get_next_state(state_reg, touch_ways)
}
for (i <- 1 until touch_ways.size) {
cover(PopCount(touch_ways.map(_.valid)) === i.U, s"PLRU_UpdateCount$i", s"PLRU Update $i simultaneous")
}
}
/** @param state state_reg bits for this sub-tree
* @param touch_way touched way encoded value bits for this sub-tree
* @param tree_nways number of ways in this sub-tree
*/
def get_next_state(state: UInt, touch_way: UInt, tree_nways: Int): UInt = {
require(state.getWidth == (tree_nways-1), s"wrong state bits width ${state.getWidth} for $tree_nways ways")
require(touch_way.getWidth == (log2Ceil(tree_nways) max 1), s"wrong encoded way width ${touch_way.getWidth} for $tree_nways ways")
if (tree_nways > 2) {
// we are at a branching node in the tree, so recurse
val right_nways: Int = 1 << (log2Ceil(tree_nways) - 1) // number of ways in the right sub-tree
val left_nways: Int = tree_nways - right_nways // number of ways in the left sub-tree
val set_left_older = !touch_way(log2Ceil(tree_nways)-1)
val left_subtree_state = state.extract(tree_nways-3, right_nways-1)
val right_subtree_state = state(right_nways-2, 0)
if (left_nways > 1) {
// we are at a branching node in the tree with both left and right sub-trees, so recurse both sub-trees
Cat(set_left_older,
Mux(set_left_older,
left_subtree_state, // if setting left sub-tree as older, do NOT recurse into left sub-tree
get_next_state(left_subtree_state, touch_way.extract(log2Ceil(left_nways)-1,0), left_nways)), // recurse left if newer
Mux(set_left_older,
get_next_state(right_subtree_state, touch_way(log2Ceil(right_nways)-1,0), right_nways), // recurse right if newer
right_subtree_state)) // if setting right sub-tree as older, do NOT recurse into right sub-tree
} else {
// we are at a branching node in the tree with only a right sub-tree, so recurse only right sub-tree
Cat(set_left_older,
Mux(set_left_older,
get_next_state(right_subtree_state, touch_way(log2Ceil(right_nways)-1,0), right_nways), // recurse right if newer
right_subtree_state)) // if setting right sub-tree as older, do NOT recurse into right sub-tree
}
} else if (tree_nways == 2) {
// we are at a leaf node at the end of the tree, so set the single state bit opposite of the lsb of the touched way encoded value
!touch_way(0)
} else { // tree_nways <= 1
// we are at an empty node in an empty tree for 1 way, so return single zero bit for Chisel (no zero-width wires)
0.U(1.W)
}
}
def get_next_state(state: UInt, touch_way: UInt): UInt = {
val touch_way_sized = if (touch_way.getWidth < log2Ceil(n_ways)) touch_way.padTo (log2Ceil(n_ways))
else touch_way.extract(log2Ceil(n_ways)-1,0)
get_next_state(state, touch_way_sized, n_ways)
}
/** @param state state_reg bits for this sub-tree
* @param tree_nways number of ways in this sub-tree
*/
def get_replace_way(state: UInt, tree_nways: Int): UInt = {
require(state.getWidth == (tree_nways-1), s"wrong state bits width ${state.getWidth} for $tree_nways ways")
// this algorithm recursively descends the binary tree, filling in the way-to-replace encoded value from msb to lsb
if (tree_nways > 2) {
// we are at a branching node in the tree, so recurse
val right_nways: Int = 1 << (log2Ceil(tree_nways) - 1) // number of ways in the right sub-tree
val left_nways: Int = tree_nways - right_nways // number of ways in the left sub-tree
val left_subtree_older = state(tree_nways-2)
val left_subtree_state = state.extract(tree_nways-3, right_nways-1)
val right_subtree_state = state(right_nways-2, 0)
if (left_nways > 1) {
// we are at a branching node in the tree with both left and right sub-trees, so recurse both sub-trees
Cat(left_subtree_older, // return the top state bit (current tree node) as msb of the way-to-replace encoded value
Mux(left_subtree_older, // if left sub-tree is older, recurse left, else recurse right
get_replace_way(left_subtree_state, left_nways), // recurse left
get_replace_way(right_subtree_state, right_nways))) // recurse right
} else {
// we are at a branching node in the tree with only a right sub-tree, so recurse only right sub-tree
Cat(left_subtree_older, // return the top state bit (current tree node) as msb of the way-to-replace encoded value
Mux(left_subtree_older, // if left sub-tree is older, return and do not recurse right
0.U(1.W),
get_replace_way(right_subtree_state, right_nways))) // recurse right
}
} else if (tree_nways == 2) {
// we are at a leaf node at the end of the tree, so just return the single state bit as lsb of the way-to-replace encoded value
state(0)
} else { // tree_nways <= 1
// we are at an empty node in an unbalanced tree for non-power-of-2 ways, so return single zero bit as lsb of the way-to-replace encoded value
0.U(1.W)
}
}
def get_replace_way(state: UInt): UInt = get_replace_way(state, n_ways)
def way = get_replace_way(state_reg)
def miss = access(way)
def hit = {}
}
class SeqPLRU(n_sets: Int, n_ways: Int) extends SeqReplacementPolicy {
val logic = new PseudoLRU(n_ways)
val state = SyncReadMem(n_sets, UInt(logic.nBits.W))
val current_state = Wire(UInt(logic.nBits.W))
val next_state = Wire(UInt(logic.nBits.W))
val plru_way = logic.get_replace_way(current_state)
def access(set: UInt) = {
current_state := state.read(set)
}
def update(valid: Bool, hit: Bool, set: UInt, way: UInt) = {
val update_way = Mux(hit, way, plru_way)
next_state := logic.get_next_state(current_state, update_way)
when (valid) { state.write(set, next_state) }
}
def way = plru_way
}
class SetAssocLRU(n_sets: Int, n_ways: Int, policy: String) extends SetAssocReplacementPolicy {
val logic = policy.toLowerCase match {
case "plru" => new PseudoLRU(n_ways)
case "lru" => new TrueLRU(n_ways)
case t => throw new IllegalArgumentException(s"unknown Replacement Policy type $t")
}
val state_vec =
if (logic.nBits == 0) Reg(Vec(n_sets, UInt(logic.nBits.W))) // Work around elaboration error on following line
else RegInit(VecInit(Seq.fill(n_sets)(0.U(logic.nBits.W))))
def access(set: UInt, touch_way: UInt) = {
state_vec(set) := logic.get_next_state(state_vec(set), touch_way)
}
def access(sets: Seq[UInt], touch_ways: Seq[Valid[UInt]]) = {
require(sets.size == touch_ways.size, "internal consistency check: should be same number of simultaneous updates for sets and touch_ways")
for (set <- 0 until n_sets) {
val set_touch_ways = (sets zip touch_ways).map { case (touch_set, touch_way) =>
Pipe(touch_way.valid && (touch_set === set.U), touch_way.bits, 0)}
when (set_touch_ways.map(_.valid).orR) {
state_vec(set) := logic.get_next_state(state_vec(set), set_touch_ways)
}
}
}
def way(set: UInt) = logic.get_replace_way(state_vec(set))
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class PLRUTest(n_ways: Int, timeout: Int = 500) extends UnitTest(timeout) {
val plru = new PseudoLRU(n_ways)
// step
io.finished := RegNext(true.B, false.B)
val get_replace_ways = (0 until (1 << (n_ways-1))).map(state =>
plru.get_replace_way(state = state.U((n_ways-1).W)))
val get_next_states = (0 until (1 << (n_ways-1))).map(state => (0 until n_ways).map(way =>
plru.get_next_state (state = state.U((n_ways-1).W), touch_way = way.U(log2Ceil(n_ways).W))))
n_ways match {
case 2 => {
assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0))
assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1))
assert(get_next_states(0)(0) === 1.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=1 actual=%d", get_next_states(0)(0))
assert(get_next_states(0)(1) === 0.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=0 actual=%d", get_next_states(0)(1))
assert(get_next_states(1)(0) === 1.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=1 actual=%d", get_next_states(1)(0))
assert(get_next_states(1)(1) === 0.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=0 actual=%d", get_next_states(1)(1))
}
case 3 => {
assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0))
assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1))
assert(get_replace_ways(2) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=2: expected=2 actual=%d", get_replace_ways(2))
assert(get_replace_ways(3) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=3: expected=2 actual=%d", get_replace_ways(3))
assert(get_next_states(0)(0) === 3.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=3 actual=%d", get_next_states(0)(0))
assert(get_next_states(0)(1) === 2.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=2 actual=%d", get_next_states(0)(1))
assert(get_next_states(0)(2) === 0.U(plru.nBits.W), s"get_next_state state=0 way=2: expected=0 actual=%d", get_next_states(0)(2))
assert(get_next_states(1)(0) === 3.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=3 actual=%d", get_next_states(1)(0))
assert(get_next_states(1)(1) === 2.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=2 actual=%d", get_next_states(1)(1))
assert(get_next_states(1)(2) === 1.U(plru.nBits.W), s"get_next_state state=1 way=2: expected=1 actual=%d", get_next_states(1)(2))
assert(get_next_states(2)(0) === 3.U(plru.nBits.W), s"get_next_state state=2 way=0: expected=3 actual=%d", get_next_states(2)(0))
assert(get_next_states(2)(1) === 2.U(plru.nBits.W), s"get_next_state state=2 way=1: expected=2 actual=%d", get_next_states(2)(1))
assert(get_next_states(2)(2) === 0.U(plru.nBits.W), s"get_next_state state=2 way=2: expected=0 actual=%d", get_next_states(2)(2))
assert(get_next_states(3)(0) === 3.U(plru.nBits.W), s"get_next_state state=3 way=0: expected=3 actual=%d", get_next_states(3)(0))
assert(get_next_states(3)(1) === 2.U(plru.nBits.W), s"get_next_state state=3 way=1: expected=2 actual=%d", get_next_states(3)(1))
assert(get_next_states(3)(2) === 1.U(plru.nBits.W), s"get_next_state state=3 way=2: expected=1 actual=%d", get_next_states(3)(2))
}
case 4 => {
assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0))
assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1))
assert(get_replace_ways(2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=2: expected=0 actual=%d", get_replace_ways(2))
assert(get_replace_ways(3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=3: expected=1 actual=%d", get_replace_ways(3))
assert(get_replace_ways(4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=4: expected=2 actual=%d", get_replace_ways(4))
assert(get_replace_ways(5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=5: expected=2 actual=%d", get_replace_ways(5))
assert(get_replace_ways(6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=6: expected=3 actual=%d", get_replace_ways(6))
assert(get_replace_ways(7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=7: expected=3 actual=%d", get_replace_ways(7))
assert(get_next_states(0)(0) === 5.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=5 actual=%d", get_next_states(0)(0))
assert(get_next_states(0)(1) === 4.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=4 actual=%d", get_next_states(0)(1))
assert(get_next_states(0)(2) === 2.U(plru.nBits.W), s"get_next_state state=0 way=2: expected=2 actual=%d", get_next_states(0)(2))
assert(get_next_states(0)(3) === 0.U(plru.nBits.W), s"get_next_state state=0 way=3: expected=0 actual=%d", get_next_states(0)(3))
assert(get_next_states(1)(0) === 5.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=5 actual=%d", get_next_states(1)(0))
assert(get_next_states(1)(1) === 4.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=4 actual=%d", get_next_states(1)(1))
assert(get_next_states(1)(2) === 3.U(plru.nBits.W), s"get_next_state state=1 way=2: expected=3 actual=%d", get_next_states(1)(2))
assert(get_next_states(1)(3) === 1.U(plru.nBits.W), s"get_next_state state=1 way=3: expected=1 actual=%d", get_next_states(1)(3))
assert(get_next_states(2)(0) === 7.U(plru.nBits.W), s"get_next_state state=2 way=0: expected=7 actual=%d", get_next_states(2)(0))
assert(get_next_states(2)(1) === 6.U(plru.nBits.W), s"get_next_state state=2 way=1: expected=6 actual=%d", get_next_states(2)(1))
assert(get_next_states(2)(2) === 2.U(plru.nBits.W), s"get_next_state state=2 way=2: expected=2 actual=%d", get_next_states(2)(2))
assert(get_next_states(2)(3) === 0.U(plru.nBits.W), s"get_next_state state=2 way=3: expected=0 actual=%d", get_next_states(2)(3))
assert(get_next_states(3)(0) === 7.U(plru.nBits.W), s"get_next_state state=3 way=0: expected=7 actual=%d", get_next_states(3)(0))
assert(get_next_states(3)(1) === 6.U(plru.nBits.W), s"get_next_state state=3 way=1: expected=6 actual=%d", get_next_states(3)(1))
assert(get_next_states(3)(2) === 3.U(plru.nBits.W), s"get_next_state state=3 way=2: expected=3 actual=%d", get_next_states(3)(2))
assert(get_next_states(3)(3) === 1.U(plru.nBits.W), s"get_next_state state=3 way=3: expected=1 actual=%d", get_next_states(3)(3))
assert(get_next_states(4)(0) === 5.U(plru.nBits.W), s"get_next_state state=4 way=0: expected=5 actual=%d", get_next_states(4)(0))
assert(get_next_states(4)(1) === 4.U(plru.nBits.W), s"get_next_state state=4 way=1: expected=4 actual=%d", get_next_states(4)(1))
assert(get_next_states(4)(2) === 2.U(plru.nBits.W), s"get_next_state state=4 way=2: expected=2 actual=%d", get_next_states(4)(2))
assert(get_next_states(4)(3) === 0.U(plru.nBits.W), s"get_next_state state=4 way=3: expected=0 actual=%d", get_next_states(4)(3))
assert(get_next_states(5)(0) === 5.U(plru.nBits.W), s"get_next_state state=5 way=0: expected=5 actual=%d", get_next_states(5)(0))
assert(get_next_states(5)(1) === 4.U(plru.nBits.W), s"get_next_state state=5 way=1: expected=4 actual=%d", get_next_states(5)(1))
assert(get_next_states(5)(2) === 3.U(plru.nBits.W), s"get_next_state state=5 way=2: expected=3 actual=%d", get_next_states(5)(2))
assert(get_next_states(5)(3) === 1.U(plru.nBits.W), s"get_next_state state=5 way=3: expected=1 actual=%d", get_next_states(5)(3))
assert(get_next_states(6)(0) === 7.U(plru.nBits.W), s"get_next_state state=6 way=0: expected=7 actual=%d", get_next_states(6)(0))
assert(get_next_states(6)(1) === 6.U(plru.nBits.W), s"get_next_state state=6 way=1: expected=6 actual=%d", get_next_states(6)(1))
assert(get_next_states(6)(2) === 2.U(plru.nBits.W), s"get_next_state state=6 way=2: expected=2 actual=%d", get_next_states(6)(2))
assert(get_next_states(6)(3) === 0.U(plru.nBits.W), s"get_next_state state=6 way=3: expected=0 actual=%d", get_next_states(6)(3))
assert(get_next_states(7)(0) === 7.U(plru.nBits.W), s"get_next_state state=7 way=0: expected=7 actual=%d", get_next_states(7)(0))
assert(get_next_states(7)(1) === 6.U(plru.nBits.W), s"get_next_state state=7 way=5: expected=6 actual=%d", get_next_states(7)(1))
assert(get_next_states(7)(2) === 3.U(plru.nBits.W), s"get_next_state state=7 way=2: expected=3 actual=%d", get_next_states(7)(2))
assert(get_next_states(7)(3) === 1.U(plru.nBits.W), s"get_next_state state=7 way=3: expected=1 actual=%d", get_next_states(7)(3))
}
case 5 => {
assert(get_replace_ways( 0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=00: expected=0 actual=%d", get_replace_ways( 0))
assert(get_replace_ways( 1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=01: expected=1 actual=%d", get_replace_ways( 1))
assert(get_replace_ways( 2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=02: expected=0 actual=%d", get_replace_ways( 2))
assert(get_replace_ways( 3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=03: expected=1 actual=%d", get_replace_ways( 3))
assert(get_replace_ways( 4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=04: expected=2 actual=%d", get_replace_ways( 4))
assert(get_replace_ways( 5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=05: expected=2 actual=%d", get_replace_ways( 5))
assert(get_replace_ways( 6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=06: expected=3 actual=%d", get_replace_ways( 6))
assert(get_replace_ways( 7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=07: expected=3 actual=%d", get_replace_ways( 7))
assert(get_replace_ways( 8) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=08: expected=4 actual=%d", get_replace_ways( 8))
assert(get_replace_ways( 9) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=09: expected=4 actual=%d", get_replace_ways( 9))
assert(get_replace_ways(10) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=10: expected=4 actual=%d", get_replace_ways(10))
assert(get_replace_ways(11) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=11: expected=4 actual=%d", get_replace_ways(11))
assert(get_replace_ways(12) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=12: expected=4 actual=%d", get_replace_ways(12))
assert(get_replace_ways(13) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=13: expected=4 actual=%d", get_replace_ways(13))
assert(get_replace_ways(14) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=14: expected=4 actual=%d", get_replace_ways(14))
assert(get_replace_ways(15) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=15: expected=4 actual=%d", get_replace_ways(15))
assert(get_next_states( 0)(0) === 13.U(plru.nBits.W), s"get_next_state state=00 way=0: expected=13 actual=%d", get_next_states( 0)(0))
assert(get_next_states( 0)(1) === 12.U(plru.nBits.W), s"get_next_state state=00 way=1: expected=12 actual=%d", get_next_states( 0)(1))
assert(get_next_states( 0)(2) === 10.U(plru.nBits.W), s"get_next_state state=00 way=2: expected=10 actual=%d", get_next_states( 0)(2))
assert(get_next_states( 0)(3) === 8.U(plru.nBits.W), s"get_next_state state=00 way=3: expected=08 actual=%d", get_next_states( 0)(3))
assert(get_next_states( 0)(4) === 0.U(plru.nBits.W), s"get_next_state state=00 way=4: expected=00 actual=%d", get_next_states( 0)(4))
assert(get_next_states( 1)(0) === 13.U(plru.nBits.W), s"get_next_state state=01 way=0: expected=13 actual=%d", get_next_states( 1)(0))
assert(get_next_states( 1)(1) === 12.U(plru.nBits.W), s"get_next_state state=01 way=1: expected=12 actual=%d", get_next_states( 1)(1))
assert(get_next_states( 1)(2) === 11.U(plru.nBits.W), s"get_next_state state=01 way=2: expected=11 actual=%d", get_next_states( 1)(2))
assert(get_next_states( 1)(3) === 9.U(plru.nBits.W), s"get_next_state state=01 way=3: expected=09 actual=%d", get_next_states( 1)(3))
assert(get_next_states( 1)(4) === 1.U(plru.nBits.W), s"get_next_state state=01 way=4: expected=01 actual=%d", get_next_states( 1)(4))
assert(get_next_states( 2)(0) === 15.U(plru.nBits.W), s"get_next_state state=02 way=0: expected=15 actual=%d", get_next_states( 2)(0))
assert(get_next_states( 2)(1) === 14.U(plru.nBits.W), s"get_next_state state=02 way=1: expected=14 actual=%d", get_next_states( 2)(1))
assert(get_next_states( 2)(2) === 10.U(plru.nBits.W), s"get_next_state state=02 way=2: expected=10 actual=%d", get_next_states( 2)(2))
assert(get_next_states( 2)(3) === 8.U(plru.nBits.W), s"get_next_state state=02 way=3: expected=08 actual=%d", get_next_states( 2)(3))
assert(get_next_states( 2)(4) === 2.U(plru.nBits.W), s"get_next_state state=02 way=4: expected=02 actual=%d", get_next_states( 2)(4))
assert(get_next_states( 3)(0) === 15.U(plru.nBits.W), s"get_next_state state=03 way=0: expected=15 actual=%d", get_next_states( 3)(0))
assert(get_next_states( 3)(1) === 14.U(plru.nBits.W), s"get_next_state state=03 way=1: expected=14 actual=%d", get_next_states( 3)(1))
assert(get_next_states( 3)(2) === 11.U(plru.nBits.W), s"get_next_state state=03 way=2: expected=11 actual=%d", get_next_states( 3)(2))
assert(get_next_states( 3)(3) === 9.U(plru.nBits.W), s"get_next_state state=03 way=3: expected=09 actual=%d", get_next_states( 3)(3))
assert(get_next_states( 3)(4) === 3.U(plru.nBits.W), s"get_next_state state=03 way=4: expected=03 actual=%d", get_next_states( 3)(4))
assert(get_next_states( 4)(0) === 13.U(plru.nBits.W), s"get_next_state state=04 way=0: expected=13 actual=%d", get_next_states( 4)(0))
assert(get_next_states( 4)(1) === 12.U(plru.nBits.W), s"get_next_state state=04 way=1: expected=12 actual=%d", get_next_states( 4)(1))
assert(get_next_states( 4)(2) === 10.U(plru.nBits.W), s"get_next_state state=04 way=2: expected=10 actual=%d", get_next_states( 4)(2))
assert(get_next_states( 4)(3) === 8.U(plru.nBits.W), s"get_next_state state=04 way=3: expected=08 actual=%d", get_next_states( 4)(3))
assert(get_next_states( 4)(4) === 4.U(plru.nBits.W), s"get_next_state state=04 way=4: expected=04 actual=%d", get_next_states( 4)(4))
assert(get_next_states( 5)(0) === 13.U(plru.nBits.W), s"get_next_state state=05 way=0: expected=13 actual=%d", get_next_states( 5)(0))
assert(get_next_states( 5)(1) === 12.U(plru.nBits.W), s"get_next_state state=05 way=1: expected=12 actual=%d", get_next_states( 5)(1))
assert(get_next_states( 5)(2) === 11.U(plru.nBits.W), s"get_next_state state=05 way=2: expected=11 actual=%d", get_next_states( 5)(2))
assert(get_next_states( 5)(3) === 9.U(plru.nBits.W), s"get_next_state state=05 way=3: expected=09 actual=%d", get_next_states( 5)(3))
assert(get_next_states( 5)(4) === 5.U(plru.nBits.W), s"get_next_state state=05 way=4: expected=05 actual=%d", get_next_states( 5)(4))
assert(get_next_states( 6)(0) === 15.U(plru.nBits.W), s"get_next_state state=06 way=0: expected=15 actual=%d", get_next_states( 6)(0))
assert(get_next_states( 6)(1) === 14.U(plru.nBits.W), s"get_next_state state=06 way=1: expected=14 actual=%d", get_next_states( 6)(1))
assert(get_next_states( 6)(2) === 10.U(plru.nBits.W), s"get_next_state state=06 way=2: expected=10 actual=%d", get_next_states( 6)(2))
assert(get_next_states( 6)(3) === 8.U(plru.nBits.W), s"get_next_state state=06 way=3: expected=08 actual=%d", get_next_states( 6)(3))
assert(get_next_states( 6)(4) === 6.U(plru.nBits.W), s"get_next_state state=06 way=4: expected=06 actual=%d", get_next_states( 6)(4))
assert(get_next_states( 7)(0) === 15.U(plru.nBits.W), s"get_next_state state=07 way=0: expected=15 actual=%d", get_next_states( 7)(0))
assert(get_next_states( 7)(1) === 14.U(plru.nBits.W), s"get_next_state state=07 way=5: expected=14 actual=%d", get_next_states( 7)(1))
assert(get_next_states( 7)(2) === 11.U(plru.nBits.W), s"get_next_state state=07 way=2: expected=11 actual=%d", get_next_states( 7)(2))
assert(get_next_states( 7)(3) === 9.U(plru.nBits.W), s"get_next_state state=07 way=3: expected=09 actual=%d", get_next_states( 7)(3))
assert(get_next_states( 7)(4) === 7.U(plru.nBits.W), s"get_next_state state=07 way=4: expected=07 actual=%d", get_next_states( 7)(4))
assert(get_next_states( 8)(0) === 13.U(plru.nBits.W), s"get_next_state state=08 way=0: expected=13 actual=%d", get_next_states( 8)(0))
assert(get_next_states( 8)(1) === 12.U(plru.nBits.W), s"get_next_state state=08 way=1: expected=12 actual=%d", get_next_states( 8)(1))
assert(get_next_states( 8)(2) === 10.U(plru.nBits.W), s"get_next_state state=08 way=2: expected=10 actual=%d", get_next_states( 8)(2))
assert(get_next_states( 8)(3) === 8.U(plru.nBits.W), s"get_next_state state=08 way=3: expected=08 actual=%d", get_next_states( 8)(3))
assert(get_next_states( 8)(4) === 0.U(plru.nBits.W), s"get_next_state state=08 way=4: expected=00 actual=%d", get_next_states( 8)(4))
assert(get_next_states( 9)(0) === 13.U(plru.nBits.W), s"get_next_state state=09 way=0: expected=13 actual=%d", get_next_states( 9)(0))
assert(get_next_states( 9)(1) === 12.U(plru.nBits.W), s"get_next_state state=09 way=1: expected=12 actual=%d", get_next_states( 9)(1))
assert(get_next_states( 9)(2) === 11.U(plru.nBits.W), s"get_next_state state=09 way=2: expected=11 actual=%d", get_next_states( 9)(2))
assert(get_next_states( 9)(3) === 9.U(plru.nBits.W), s"get_next_state state=09 way=3: expected=09 actual=%d", get_next_states( 9)(3))
assert(get_next_states( 9)(4) === 1.U(plru.nBits.W), s"get_next_state state=09 way=4: expected=01 actual=%d", get_next_states( 9)(4))
assert(get_next_states(10)(0) === 15.U(plru.nBits.W), s"get_next_state state=10 way=0: expected=15 actual=%d", get_next_states(10)(0))
assert(get_next_states(10)(1) === 14.U(plru.nBits.W), s"get_next_state state=10 way=1: expected=14 actual=%d", get_next_states(10)(1))
assert(get_next_states(10)(2) === 10.U(plru.nBits.W), s"get_next_state state=10 way=2: expected=10 actual=%d", get_next_states(10)(2))
assert(get_next_states(10)(3) === 8.U(plru.nBits.W), s"get_next_state state=10 way=3: expected=08 actual=%d", get_next_states(10)(3))
assert(get_next_states(10)(4) === 2.U(plru.nBits.W), s"get_next_state state=10 way=4: expected=02 actual=%d", get_next_states(10)(4))
assert(get_next_states(11)(0) === 15.U(plru.nBits.W), s"get_next_state state=11 way=0: expected=15 actual=%d", get_next_states(11)(0))
assert(get_next_states(11)(1) === 14.U(plru.nBits.W), s"get_next_state state=11 way=1: expected=14 actual=%d", get_next_states(11)(1))
assert(get_next_states(11)(2) === 11.U(plru.nBits.W), s"get_next_state state=11 way=2: expected=11 actual=%d", get_next_states(11)(2))
assert(get_next_states(11)(3) === 9.U(plru.nBits.W), s"get_next_state state=11 way=3: expected=09 actual=%d", get_next_states(11)(3))
assert(get_next_states(11)(4) === 3.U(plru.nBits.W), s"get_next_state state=11 way=4: expected=03 actual=%d", get_next_states(11)(4))
assert(get_next_states(12)(0) === 13.U(plru.nBits.W), s"get_next_state state=12 way=0: expected=13 actual=%d", get_next_states(12)(0))
assert(get_next_states(12)(1) === 12.U(plru.nBits.W), s"get_next_state state=12 way=1: expected=12 actual=%d", get_next_states(12)(1))
assert(get_next_states(12)(2) === 10.U(plru.nBits.W), s"get_next_state state=12 way=2: expected=10 actual=%d", get_next_states(12)(2))
assert(get_next_states(12)(3) === 8.U(plru.nBits.W), s"get_next_state state=12 way=3: expected=08 actual=%d", get_next_states(12)(3))
assert(get_next_states(12)(4) === 4.U(plru.nBits.W), s"get_next_state state=12 way=4: expected=04 actual=%d", get_next_states(12)(4))
assert(get_next_states(13)(0) === 13.U(plru.nBits.W), s"get_next_state state=13 way=0: expected=13 actual=%d", get_next_states(13)(0))
assert(get_next_states(13)(1) === 12.U(plru.nBits.W), s"get_next_state state=13 way=1: expected=12 actual=%d", get_next_states(13)(1))
assert(get_next_states(13)(2) === 11.U(plru.nBits.W), s"get_next_state state=13 way=2: expected=11 actual=%d", get_next_states(13)(2))
assert(get_next_states(13)(3) === 9.U(plru.nBits.W), s"get_next_state state=13 way=3: expected=09 actual=%d", get_next_states(13)(3))
assert(get_next_states(13)(4) === 5.U(plru.nBits.W), s"get_next_state state=13 way=4: expected=05 actual=%d", get_next_states(13)(4))
assert(get_next_states(14)(0) === 15.U(plru.nBits.W), s"get_next_state state=14 way=0: expected=15 actual=%d", get_next_states(14)(0))
assert(get_next_states(14)(1) === 14.U(plru.nBits.W), s"get_next_state state=14 way=1: expected=14 actual=%d", get_next_states(14)(1))
assert(get_next_states(14)(2) === 10.U(plru.nBits.W), s"get_next_state state=14 way=2: expected=10 actual=%d", get_next_states(14)(2))
assert(get_next_states(14)(3) === 8.U(plru.nBits.W), s"get_next_state state=14 way=3: expected=08 actual=%d", get_next_states(14)(3))
assert(get_next_states(14)(4) === 6.U(plru.nBits.W), s"get_next_state state=14 way=4: expected=06 actual=%d", get_next_states(14)(4))
assert(get_next_states(15)(0) === 15.U(plru.nBits.W), s"get_next_state state=15 way=0: expected=15 actual=%d", get_next_states(15)(0))
assert(get_next_states(15)(1) === 14.U(plru.nBits.W), s"get_next_state state=15 way=5: expected=14 actual=%d", get_next_states(15)(1))
assert(get_next_states(15)(2) === 11.U(plru.nBits.W), s"get_next_state state=15 way=2: expected=11 actual=%d", get_next_states(15)(2))
assert(get_next_states(15)(3) === 9.U(plru.nBits.W), s"get_next_state state=15 way=3: expected=09 actual=%d", get_next_states(15)(3))
assert(get_next_states(15)(4) === 7.U(plru.nBits.W), s"get_next_state state=15 way=4: expected=07 actual=%d", get_next_states(15)(4))
}
case 6 => {
assert(get_replace_ways( 0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=00: expected=0 actual=%d", get_replace_ways( 0))
assert(get_replace_ways( 1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=01: expected=1 actual=%d", get_replace_ways( 1))
assert(get_replace_ways( 2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=02: expected=0 actual=%d", get_replace_ways( 2))
assert(get_replace_ways( 3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=03: expected=1 actual=%d", get_replace_ways( 3))
assert(get_replace_ways( 4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=04: expected=2 actual=%d", get_replace_ways( 4))
assert(get_replace_ways( 5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=05: expected=2 actual=%d", get_replace_ways( 5))
assert(get_replace_ways( 6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=06: expected=3 actual=%d", get_replace_ways( 6))
assert(get_replace_ways( 7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=07: expected=3 actual=%d", get_replace_ways( 7))
assert(get_replace_ways( 8) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=08: expected=0 actual=%d", get_replace_ways( 8))
assert(get_replace_ways( 9) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=09: expected=1 actual=%d", get_replace_ways( 9))
assert(get_replace_ways(10) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=10: expected=0 actual=%d", get_replace_ways(10))
assert(get_replace_ways(11) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=11: expected=1 actual=%d", get_replace_ways(11))
assert(get_replace_ways(12) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=12: expected=2 actual=%d", get_replace_ways(12))
assert(get_replace_ways(13) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=13: expected=2 actual=%d", get_replace_ways(13))
assert(get_replace_ways(14) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=14: expected=3 actual=%d", get_replace_ways(14))
assert(get_replace_ways(15) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=15: expected=3 actual=%d", get_replace_ways(15))
assert(get_replace_ways(16) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=16: expected=4 actual=%d", get_replace_ways(16))
assert(get_replace_ways(17) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=17: expected=4 actual=%d", get_replace_ways(17))
assert(get_replace_ways(18) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=18: expected=4 actual=%d", get_replace_ways(18))
assert(get_replace_ways(19) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=19: expected=4 actual=%d", get_replace_ways(19))
assert(get_replace_ways(20) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=20: expected=4 actual=%d", get_replace_ways(20))
assert(get_replace_ways(21) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=21: expected=4 actual=%d", get_replace_ways(21))
assert(get_replace_ways(22) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=22: expected=4 actual=%d", get_replace_ways(22))
assert(get_replace_ways(23) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=23: expected=4 actual=%d", get_replace_ways(23))
assert(get_replace_ways(24) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=24: expected=5 actual=%d", get_replace_ways(24))
assert(get_replace_ways(25) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=25: expected=5 actual=%d", get_replace_ways(25))
assert(get_replace_ways(26) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=26: expected=5 actual=%d", get_replace_ways(26))
assert(get_replace_ways(27) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=27: expected=5 actual=%d", get_replace_ways(27))
assert(get_replace_ways(28) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=28: expected=5 actual=%d", get_replace_ways(28))
assert(get_replace_ways(29) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=29: expected=5 actual=%d", get_replace_ways(29))
assert(get_replace_ways(30) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=30: expected=5 actual=%d", get_replace_ways(30))
assert(get_replace_ways(31) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=31: expected=5 actual=%d", get_replace_ways(31))
}
case _ => throw new IllegalArgumentException(s"no test pattern found for n_ways=$n_ways")
}
}
File util.scala:
//******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v4.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v4.common.{MicroOp}
import boom.v4.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, flush: Bool, uop: MicroOp): Bool = {
return apply(brupdate, flush, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, flush: Bool, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask) || flush
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: T): Bool = {
return apply(brupdate, flush, bundle.uop)
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Bool = {
return apply(brupdate, flush, bundle.bits)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, flush, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v4.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U, IS_N}
def apply(i: UInt, isel: UInt): UInt = {
val ip = Mux(isel === IS_N, 0.U(LONGEST_IMM_SZ.W), i)
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0)
}
}
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
object IsYoungerMask
{
def apply(i: UInt, head: UInt, n: Integer): UInt = {
val hi_mask = ~MaskLower(UIntToOH(i)(n-1,0))
val lo_mask = ~MaskUpper(UIntToOH(head)(n-1,0))
Mux(i < head, hi_mask & lo_mask, hi_mask | lo_mask)(n-1,0)
}
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v4.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v4.common.MicroOp => Bool = u => true.B, fastDeq: Boolean = false)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
if (fastDeq && entries > 1) {
// Pipeline dequeue selection so the mux gets an entire cycle
val main = Module(new BranchKillableQueue(gen, entries-1, flush_fn, false))
val out_reg = Reg(gen)
val out_valid = RegInit(false.B)
val out_uop = Reg(new MicroOp)
main.io.enq <> io.enq
main.io.brupdate := io.brupdate
main.io.flush := io.flush
io.empty := main.io.empty && !out_valid
io.count := main.io.count + out_valid
io.deq.valid := out_valid
io.deq.bits := out_reg
io.deq.bits.uop := out_uop
out_uop := UpdateBrMask(io.brupdate, out_uop)
out_valid := out_valid && !IsKilledByBranch(io.brupdate, false.B, out_uop) && !(io.flush && flush_fn(out_uop))
main.io.deq.ready := false.B
when (io.deq.fire || !out_valid) {
out_valid := main.io.deq.valid && !IsKilledByBranch(io.brupdate, false.B, main.io.deq.bits.uop) && !(io.flush && flush_fn(main.io.deq.bits.uop))
out_reg := main.io.deq.bits
out_uop := UpdateBrMask(io.brupdate, main.io.deq.bits.uop)
main.io.deq.ready := true.B
}
} else {
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire && !IsKilledByBranch(io.brupdate, false.B, io.enq.bits.uop) && !(io.flush && flush_fn(io.enq.bits.uop)))
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, false.B, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value)
io.deq.bits := out
val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
}
class BranchKillablePipeline[T <: boom.v4.common.HasBoomUOP](gen: T, stages: Int)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val req = Input(Valid(gen))
val flush = Input(Bool())
val brupdate = Input(new BrUpdateInfo)
val resp = Output(Vec(stages, Valid(gen)))
})
require(stages > 0)
val uops = Reg(Vec(stages, Valid(gen)))
uops(0).valid := io.req.valid && !IsKilledByBranch(io.brupdate, io.flush, io.req.bits)
uops(0).bits := UpdateBrMask(io.brupdate, io.req.bits)
for (i <- 1 until stages) {
uops(i).valid := uops(i-1).valid && !IsKilledByBranch(io.brupdate, io.flush, uops(i-1).bits)
uops(i).bits := UpdateBrMask(io.brupdate, uops(i-1).bits)
}
for (i <- 0 until stages) { when (reset.asBool) { uops(i).valid := false.B } }
io.resp := uops
}
File Consts.scala:
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.rocket.constants
import chisel3._
import chisel3.util._
import freechips.rocketchip.util._
trait ScalarOpConstants {
val SZ_BR = 3
def BR_X = BitPat("b???")
def BR_EQ = 0.U(3.W)
def BR_NE = 1.U(3.W)
def BR_J = 2.U(3.W)
def BR_N = 3.U(3.W)
def BR_LT = 4.U(3.W)
def BR_GE = 5.U(3.W)
def BR_LTU = 6.U(3.W)
def BR_GEU = 7.U(3.W)
def A1_X = BitPat("b??")
def A1_ZERO = 0.U(2.W)
def A1_RS1 = 1.U(2.W)
def A1_PC = 2.U(2.W)
def A1_RS1SHL = 3.U(2.W)
def IMM_X = BitPat("b???")
def IMM_S = 0.U(3.W)
def IMM_SB = 1.U(3.W)
def IMM_U = 2.U(3.W)
def IMM_UJ = 3.U(3.W)
def IMM_I = 4.U(3.W)
def IMM_Z = 5.U(3.W)
def A2_X = BitPat("b???")
def A2_ZERO = 0.U(3.W)
def A2_SIZE = 1.U(3.W)
def A2_RS2 = 2.U(3.W)
def A2_IMM = 3.U(3.W)
def A2_RS2OH = 4.U(3.W)
def A2_IMMOH = 5.U(3.W)
def X = BitPat("b?")
def N = BitPat("b0")
def Y = BitPat("b1")
val SZ_DW = 1
def DW_X = X
def DW_32 = false.B
def DW_64 = true.B
def DW_XPR = DW_64
}
trait MemoryOpConstants {
val NUM_XA_OPS = 9
val M_SZ = 5
def M_X = BitPat("b?????");
def M_XRD = "b00000".U; // int load
def M_XWR = "b00001".U; // int store
def M_PFR = "b00010".U; // prefetch with intent to read
def M_PFW = "b00011".U; // prefetch with intent to write
def M_XA_SWAP = "b00100".U
def M_FLUSH_ALL = "b00101".U // flush all lines
def M_XLR = "b00110".U
def M_XSC = "b00111".U
def M_XA_ADD = "b01000".U
def M_XA_XOR = "b01001".U
def M_XA_OR = "b01010".U
def M_XA_AND = "b01011".U
def M_XA_MIN = "b01100".U
def M_XA_MAX = "b01101".U
def M_XA_MINU = "b01110".U
def M_XA_MAXU = "b01111".U
def M_FLUSH = "b10000".U // write back dirty data and cede R/W permissions
def M_PWR = "b10001".U // partial (masked) store
def M_PRODUCE = "b10010".U // write back dirty data and cede W permissions
def M_CLEAN = "b10011".U // write back dirty data and retain R/W permissions
def M_SFENCE = "b10100".U // SFENCE.VMA
def M_HFENCEV = "b10101".U // HFENCE.VVMA
def M_HFENCEG = "b10110".U // HFENCE.GVMA
def M_WOK = "b10111".U // check write permissions but don't perform a write
def M_HLVX = "b10000".U // HLVX instruction
def isAMOLogical(cmd: UInt) = cmd.isOneOf(M_XA_SWAP, M_XA_XOR, M_XA_OR, M_XA_AND)
def isAMOArithmetic(cmd: UInt) = cmd.isOneOf(M_XA_ADD, M_XA_MIN, M_XA_MAX, M_XA_MINU, M_XA_MAXU)
def isAMO(cmd: UInt) = isAMOLogical(cmd) || isAMOArithmetic(cmd)
def isPrefetch(cmd: UInt) = cmd === M_PFR || cmd === M_PFW
def isRead(cmd: UInt) = cmd.isOneOf(M_XRD, M_HLVX, M_XLR, M_XSC) || isAMO(cmd)
def isWrite(cmd: UInt) = cmd === M_XWR || cmd === M_PWR || cmd === M_XSC || isAMO(cmd)
def isWriteIntent(cmd: UInt) = isWrite(cmd) || cmd === M_PFW || cmd === M_XLR
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File consts.scala:
//******************************************************************************
// Copyright (c) 2011 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// RISCV Processor Constants
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v4.common.constants
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util.Str
import freechips.rocketchip.rocket.RVCExpander
/**
* Mixin for scalar operation constants
*/
trait ScalarOpConstants
{
val X = BitPat("b?")
val Y = BitPat("b1")
val N = BitPat("b0")
//************************************
// Extra Constants
// Which branch predictor predicted us
val BSRC_SZ = 3
val BSRC_1 = 0.U(BSRC_SZ.W) // 1-cycle branch pred
val BSRC_2 = 1.U(BSRC_SZ.W) // 2-cycle branch pred
val BSRC_3 = 2.U(BSRC_SZ.W) // 3-cycle branch pred
val BSRC_4 = 3.U(BSRC_SZ.W) // 4-cycle branch pred
val BSRC_C = 4.U(BSRC_SZ.W) // core branch resolution
//************************************
// Control Signals
// CFI types
val CFI_SZ = 3
val CFI_X = 0.U(CFI_SZ.W) // Not a CFI instruction
val CFI_BR = 1.U(CFI_SZ.W) // Branch
val CFI_JAL = 2.U(CFI_SZ.W) // JAL
val CFI_JALR = 3.U(CFI_SZ.W) // JALR
// PC Select Signal
val PC_PLUS4 = 0.U(2.W) // PC + 4
val PC_BRJMP = 1.U(2.W) // brjmp_target
val PC_JALR = 2.U(2.W) // jump_reg_target
// Branch Type
val B_N = 0.U(4.W) // Next
val B_NE = 1.U(4.W) // Branch on NotEqual
val B_EQ = 2.U(4.W) // Branch on Equal
val B_GE = 3.U(4.W) // Branch on Greater/Equal
val B_GEU = 4.U(4.W) // Branch on Greater/Equal Unsigned
val B_LT = 5.U(4.W) // Branch on Less Than
val B_LTU = 6.U(4.W) // Branch on Less Than Unsigned
val B_J = 7.U(4.W) // Jump
val B_JR = 8.U(4.W) // Jump Register
// RS1 Operand Select Signal
val OP1_RS1 = 0.U(2.W) // Register Source #1
val OP1_ZERO= 1.U(2.W)
val OP1_PC = 2.U(2.W)
val OP1_RS1SHL = 3.U(2.W)
val OP1_X = BitPat("b??")
// RS2 Operand Select Signal
val OP2_RS2 = 0.U(3.W) // Register Source #2
val OP2_IMM = 1.U(3.W) // immediate
val OP2_ZERO= 2.U(3.W) // constant 0
val OP2_NEXT= 3.U(3.W) // constant 2/4 (for PC+2/4)
val OP2_IMMC= 4.U(3.W) // for CSR imm found in RS1
val OP2_RS2OH = 5.U(3.W)
val OP2_IMMOH = 6.U(3.W)
val OP2_X = BitPat("b???")
// Register File Write Enable Signal
val REN_0 = false.B
val REN_1 = true.B
// Is 32b Word or 64b Doubldword?
val SZ_DW = 1
val DW_X = true.B // Bool(xLen==64)
val DW_32 = false.B
val DW_64 = true.B
val DW_XPR = true.B // Bool(xLen==64)
// Memory Enable Signal
val MEN_0 = false.B
val MEN_1 = true.B
val MEN_X = false.B
// Immediate Extend Select
val IS_I = 0.U(3.W) // I-Type (LD,ALU)
val IS_S = 1.U(3.W) // S-Type (ST)
val IS_B = 2.U(3.W) // SB-Type (BR)
val IS_U = 3.U(3.W) // U-Type (LUI/AUIPC)
val IS_J = 4.U(3.W) // UJ-Type (J/JAL)
val IS_SH = 5.U(3.W) // short-type (sign extend from pimm to get imm)
val IS_N = 6.U(3.W) // No immediate (zeros immediate)
val IS_F3 = 7.U(3.W) // funct3
// Decode Stage Control Signals
val RT_FIX = 0.U(2.W)
val RT_FLT = 1.U(2.W)
val RT_X = 2.U(2.W) // not-a-register (prs1 = lrs1 special case)
val RT_ZERO = 3.U(2.W)
// IQT type
val IQ_SZ = 4
val IQ_MEM = 0
val IQ_UNQ = 1
val IQ_ALU = 2
val IQ_FP = 3
// Functional unit select
// bit mask, since a given execution pipeline may support multiple functional units
val FC_SZ = 10
val FC_ALU = 0
val FC_AGEN = 1
val FC_DGEN = 2
val FC_MUL = 3
val FC_DIV = 4
val FC_CSR = 5
val FC_FPU = 6
val FC_FDV = 7
val FC_I2F = 8
val FC_F2I = 9
def NullMicroOp(implicit p: Parameters) = 0.U.asTypeOf(new boom.v4.common.MicroOp)
}
/**
* Mixin for RISCV constants
*/
trait RISCVConstants
{
// abstract out instruction decode magic numbers
val RD_MSB = 11
val RD_LSB = 7
val RS1_MSB = 19
val RS1_LSB = 15
val RS2_MSB = 24
val RS2_LSB = 20
val RS3_MSB = 31
val RS3_LSB = 27
val CSR_ADDR_MSB = 31
val CSR_ADDR_LSB = 20
val CSR_ADDR_SZ = 12
// location of the fifth bit in the shamt (for checking for illegal ops for SRAIW,etc.)
val SHAMT_5_BIT = 25
val LONGEST_IMM_SZ = 20
val X0 = 0.U
val RA = 1.U // return address register
// memory consistency model
// The C/C++ atomics MCM requires that two loads to the same address maintain program order.
// The Cortex A9 does NOT enforce load/load ordering (which leads to buggy behavior).
val MCM_ORDER_DEPENDENT_LOADS = true
val jal_opc = (0x6f).U
val jalr_opc = (0x67).U
def GetUop(inst: UInt): UInt = inst(6,0)
def GetRd (inst: UInt): UInt = inst(RD_MSB,RD_LSB)
def GetRs1(inst: UInt): UInt = inst(RS1_MSB,RS1_LSB)
def ExpandRVC(inst: UInt)(implicit p: Parameters): UInt = {
val rvc_exp = Module(new RVCExpander)
rvc_exp.io.in := inst
Mux(rvc_exp.io.rvc, rvc_exp.io.out.bits, inst)
}
// Note: Accepts only EXPANDED rvc instructions
def ComputeBranchTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = {
val b_imm32 = Cat(Fill(20,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
((pc.asSInt + b_imm32.asSInt).asSInt & (-2).S).asUInt
}
// Note: Accepts only EXPANDED rvc instructions
def ComputeJALTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = {
val j_imm32 = Cat(Fill(12,inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
((pc.asSInt + j_imm32.asSInt).asSInt & (-2).S).asUInt
}
// Note: Accepts only EXPANDED rvc instructions
def GetCfiType(inst: UInt)(implicit p: Parameters): UInt = {
val bdecode = Module(new boom.v4.exu.BranchDecode)
bdecode.io.inst := inst
bdecode.io.pc := 0.U
bdecode.io.out.cfi_type
}
}
/**
* Mixin for exception cause constants
*/
trait ExcCauseConstants
{
// a memory disambigious misspeculation occurred
val MINI_EXCEPTION_MEM_ORDERING = 16.U
val MINI_EXCEPTION_CSR_REPLAY = 17.U
require (!freechips.rocketchip.rocket.Causes.all.contains(16))
require (!freechips.rocketchip.rocket.Causes.all.contains(17))
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
File Arbiter.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
object TLArbiter
{
// (valids, select) => readys
type Policy = (Integer, UInt, Bool) => UInt
val lowestIndexFirst: Policy = (width, valids, select) => ~(leftOR(valids) << 1)(width-1, 0)
val highestIndexFirst: Policy = (width, valids, select) => ~((rightOR(valids) >> 1).pad(width))
val roundRobin: Policy = (width, valids, select) => if (width == 1) 1.U(1.W) else {
val valid = valids(width-1, 0)
assert (valid === valids)
val mask = RegInit(((BigInt(1) << width)-1).U(width-1,0))
val filter = Cat(valid & ~mask, valid)
val unready = (rightOR(filter, width*2, width) >> 1) | (mask << width)
val readys = ~((unready >> width) & unready(width-1, 0))
when (select && valid.orR) {
mask := leftOR(readys & valid, width)
}
readys(width-1, 0)
}
def lowestFromSeq[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: Seq[DecoupledIO[T]]): Unit = {
apply(lowestIndexFirst)(sink, sources.map(s => (edge.numBeats1(s.bits), s)):_*)
}
def lowest[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = {
apply(lowestIndexFirst)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*)
}
def highest[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = {
apply(highestIndexFirst)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*)
}
def robin[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = {
apply(roundRobin)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*)
}
def apply[T <: Data](policy: Policy)(sink: DecoupledIO[T], sources: (UInt, DecoupledIO[T])*): Unit = {
if (sources.isEmpty) {
sink.bits := DontCare
} else if (sources.size == 1) {
sink :<>= sources.head._2
} else {
val pairs = sources.toList
val beatsIn = pairs.map(_._1)
val sourcesIn = pairs.map(_._2)
// The number of beats which remain to be sent
val beatsLeft = RegInit(0.U)
val idle = beatsLeft === 0.U
val latch = idle && sink.ready // winner (if any) claims sink
// Who wants access to the sink?
val valids = sourcesIn.map(_.valid)
// Arbitrate amongst the requests
val readys = VecInit(policy(valids.size, Cat(valids.reverse), latch).asBools)
// Which request wins arbitration?
val winner = VecInit((readys zip valids) map { case (r,v) => r&&v })
// Confirm the policy works properly
require (readys.size == valids.size)
// Never two winners
val prefixOR = winner.scanLeft(false.B)(_||_).init
assert((prefixOR zip winner) map { case (p,w) => !p || !w } reduce {_ && _})
// If there was any request, there is a winner
assert (!valids.reduce(_||_) || winner.reduce(_||_))
// Track remaining beats
val maskedBeats = (winner zip beatsIn) map { case (w,b) => Mux(w, b, 0.U) }
val initBeats = maskedBeats.reduce(_ | _) // no winner => 0 beats
beatsLeft := Mux(latch, initBeats, beatsLeft - sink.fire)
// The one-hot source granted access in the previous cycle
val state = RegInit(VecInit(Seq.fill(sources.size)(false.B)))
val muxState = Mux(idle, winner, state)
state := muxState
val allowed = Mux(idle, readys, state)
(sourcesIn zip allowed) foreach { case (s, r) =>
s.ready := sink.ready && r
}
sink.valid := Mux(idle, valids.reduce(_||_), Mux1H(state, valids))
sink.bits :<= Mux1H(muxState, sourcesIn.map(_.bits))
}
}
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
abstract class DecoupledArbiterTest(
policy: TLArbiter.Policy,
txns: Int,
timeout: Int,
val numSources: Int,
beatsLeftFromIdx: Int => UInt)
(implicit p: Parameters) extends UnitTest(timeout)
{
val sources = Wire(Vec(numSources, DecoupledIO(UInt(log2Ceil(numSources).W))))
dontTouch(sources.suggestName("sources"))
val sink = Wire(DecoupledIO(UInt(log2Ceil(numSources).W)))
dontTouch(sink.suggestName("sink"))
val count = RegInit(0.U(log2Ceil(txns).W))
val lfsr = LFSR(16, true.B)
sources.zipWithIndex.map { case (z, i) => z.bits := i.U }
TLArbiter(policy)(sink, sources.zipWithIndex.map {
case (z, i) => (beatsLeftFromIdx(i), z)
}:_*)
count := count + 1.U
io.finished := count >= txns.U
}
/** This tests that when a specific pattern of source valids are driven,
* a new index from amongst that pattern is always selected,
* unless one of those sources takes multiple beats,
* in which case the same index should be selected until the arbiter goes idle.
*/
class TLDecoupledArbiterRobinTest(txns: Int = 128, timeout: Int = 500000, print: Boolean = false)
(implicit p: Parameters)
extends DecoupledArbiterTest(TLArbiter.roundRobin, txns, timeout, 6, i => i.U)
{
val lastWinner = RegInit((numSources+1).U)
val beatsLeft = RegInit(0.U(log2Ceil(numSources).W))
val first = lastWinner > numSources.U
val valid = lfsr(0)
val ready = lfsr(15)
sink.ready := ready
sources.zipWithIndex.map { // pattern: every even-indexed valid is driven the same random way
case (s, i) => s.valid := (if (i % 2 == 1) false.B else valid)
}
when (sink.fire) {
if (print) { printf("TestRobin: %d\n", sink.bits) }
when (beatsLeft === 0.U) {
assert(lastWinner =/= sink.bits, "Round robin did not pick a new idx despite one being valid.")
lastWinner := sink.bits
beatsLeft := sink.bits
} .otherwise {
assert(lastWinner === sink.bits, "Round robin did not pick the same index over multiple beats")
beatsLeft := beatsLeft - 1.U
}
}
if (print) {
when (!sink.fire) { printf("TestRobin: idle (%d %d)\n", valid, ready) }
}
}
/** This tests that the lowest index is always selected across random single cycle transactions. */
class TLDecoupledArbiterLowestTest(txns: Int = 128, timeout: Int = 500000)(implicit p: Parameters)
extends DecoupledArbiterTest(TLArbiter.lowestIndexFirst, txns, timeout, 15, _ => 0.U)
{
def assertLowest(id: Int): Unit = {
when (sources(id).valid) {
assert((numSources-1 until id by -1).map(!sources(_).fire).foldLeft(true.B)(_&&_), s"$id was valid but a higher valid source was granted ready.")
}
}
sources.zipWithIndex.map { case (s, i) => s.valid := lfsr(i) }
sink.ready := lfsr(15)
when (sink.fire) { (0 until numSources).foreach(assertLowest(_)) }
}
/** This tests that the highest index is always selected across random single cycle transactions. */
class TLDecoupledArbiterHighestTest(txns: Int = 128, timeout: Int = 500000)(implicit p: Parameters)
extends DecoupledArbiterTest(TLArbiter.highestIndexFirst, txns, timeout, 15, _ => 0.U)
{
def assertHighest(id: Int): Unit = {
when (sources(id).valid) {
assert((0 until id).map(!sources(_).fire).foldLeft(true.B)(_&&_), s"$id was valid but a lower valid source was granted ready.")
}
}
sources.zipWithIndex.map { case (s, i) => s.valid := lfsr(i) }
sink.ready := lfsr(15)
when (sink.fire) { (0 until numSources).foreach(assertHighest(_)) }
}
File AMOALU.scala:
// See LICENSE.SiFive for license details.
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
class StoreGen(typ: UInt, addr: UInt, dat: UInt, maxSize: Int) {
val size = Wire(UInt(log2Up(log2Up(maxSize)+1).W))
size := typ
val dat_padded = dat.pad(maxSize*8)
def misaligned: Bool =
(addr & ((1.U << size) - 1.U)(log2Up(maxSize)-1,0)).orR
def mask = {
var res = 1.U
for (i <- 0 until log2Up(maxSize)) {
val upper = Mux(addr(i), res, 0.U) | Mux(size >= (i+1).U, ((BigInt(1) << (1 << i))-1).U, 0.U)
val lower = Mux(addr(i), 0.U, res)
res = Cat(upper, lower)
}
res
}
protected def genData(i: Int): UInt =
if (i >= log2Up(maxSize)) dat_padded
else Mux(size === i.U, Fill(1 << (log2Up(maxSize)-i), dat_padded((8 << i)-1,0)), genData(i+1))
def data = genData(0)
def wordData = genData(2)
}
class LoadGen(typ: UInt, signed: Bool, addr: UInt, dat: UInt, zero: Bool, maxSize: Int) {
private val size = new StoreGen(typ, addr, dat, maxSize).size
private def genData(logMinSize: Int): UInt = {
var res = dat
for (i <- log2Up(maxSize)-1 to logMinSize by -1) {
val pos = 8 << i
val shifted = Mux(addr(i), res(2*pos-1,pos), res(pos-1,0))
val doZero = (i == 0).B && zero
val zeroed = Mux(doZero, 0.U, shifted)
res = Cat(Mux(size === i.U || doZero, Fill(8*maxSize-pos, signed && zeroed(pos-1)), res(8*maxSize-1,pos)), zeroed)
}
res
}
def wordData = genData(2)
def data = genData(0)
}
class AMOALU(operandBits: Int)(implicit p: Parameters) extends Module {
val minXLen = 32
val widths = (0 to log2Ceil(operandBits / minXLen)).map(minXLen << _)
val io = IO(new Bundle {
val mask = Input(UInt((operandBits / 8).W))
val cmd = Input(UInt(M_SZ.W))
val lhs = Input(UInt(operandBits.W))
val rhs = Input(UInt(operandBits.W))
val out = Output(UInt(operandBits.W))
val out_unmasked = Output(UInt(operandBits.W))
})
val max = io.cmd === M_XA_MAX || io.cmd === M_XA_MAXU
val min = io.cmd === M_XA_MIN || io.cmd === M_XA_MINU
val add = io.cmd === M_XA_ADD
val logic_and = io.cmd === M_XA_OR || io.cmd === M_XA_AND
val logic_xor = io.cmd === M_XA_XOR || io.cmd === M_XA_OR
val adder_out = {
// partition the carry chain to support sub-xLen addition
val mask = ~(0.U(operandBits.W) +: widths.init.map(w => !io.mask(w/8-1) << (w-1))).reduce(_|_)
(io.lhs & mask) + (io.rhs & mask)
}
val less = {
// break up the comparator so the lower parts will be CSE'd
def isLessUnsigned(x: UInt, y: UInt, n: Int): Bool = {
if (n == minXLen) x(n-1, 0) < y(n-1, 0)
else x(n-1, n/2) < y(n-1, n/2) || x(n-1, n/2) === y(n-1, n/2) && isLessUnsigned(x, y, n/2)
}
def isLess(x: UInt, y: UInt, n: Int): Bool = {
val signed = {
val mask = M_XA_MIN ^ M_XA_MINU
(io.cmd & mask) === (M_XA_MIN & mask)
}
Mux(x(n-1) === y(n-1), isLessUnsigned(x, y, n), Mux(signed, x(n-1), y(n-1)))
}
PriorityMux(widths.reverse.map(w => (io.mask(w/8/2), isLess(io.lhs, io.rhs, w))))
}
val minmax = Mux(Mux(less, min, max), io.lhs, io.rhs)
val logic =
Mux(logic_and, io.lhs & io.rhs, 0.U) |
Mux(logic_xor, io.lhs ^ io.rhs, 0.U)
val out =
Mux(add, adder_out,
Mux(logic_and || logic_xor, logic,
minmax))
val wmask = FillInterleaved(8, io.mask)
io.out := wmask & out | ~wmask & io.lhs
io.out_unmasked := out
}
| module BoomNonBlockingDCache_1( // @[dcache.scala:438:7]
input clock, // @[dcache.scala:438:7]
input reset, // @[dcache.scala:438:7]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_b_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_b_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_b_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_b_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_b_bits_size, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_b_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_out_b_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_out_b_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_b_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_b_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_c_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_c_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_c_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_c_bits_size, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_c_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_c_bits_address, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_c_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_e_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_e_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_e_bits_sink, // @[LazyModuleImp.scala:107:25]
output io_lsu_req_ready, // @[dcache.scala:444:14]
input io_lsu_req_valid, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_valid, // @[dcache.scala:444:14]
input [31:0] io_lsu_req_bits_0_bits_uop_inst, // @[dcache.scala:444:14]
input [31:0] io_lsu_req_bits_0_bits_uop_debug_inst, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_is_rvc, // @[dcache.scala:444:14]
input [33:0] io_lsu_req_bits_0_bits_uop_debug_pc, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_iq_type_0, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_iq_type_1, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_iq_type_2, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_iq_type_3, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fu_code_0, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fu_code_1, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fu_code_2, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fu_code_3, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fu_code_4, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fu_code_5, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fu_code_6, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fu_code_7, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fu_code_8, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fu_code_9, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_iw_issued, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_iw_issued_partial_agen, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_iw_issued_partial_dgen, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_iw_p1_speculative_child, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_iw_p2_speculative_child, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_iw_p1_bypass_hint, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_iw_p2_bypass_hint, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_iw_p3_bypass_hint, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_dis_col_sel, // @[dcache.scala:444:14]
input [3:0] io_lsu_req_bits_0_bits_uop_br_mask, // @[dcache.scala:444:14]
input [1:0] io_lsu_req_bits_0_bits_uop_br_tag, // @[dcache.scala:444:14]
input [3:0] io_lsu_req_bits_0_bits_uop_br_type, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_is_sfb, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_is_fence, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_is_fencei, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_is_sfence, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_is_amo, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_is_eret, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_is_sys_pc2epc, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_is_rocc, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_is_mov, // @[dcache.scala:444:14]
input [3:0] io_lsu_req_bits_0_bits_uop_ftq_idx, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_edge_inst, // @[dcache.scala:444:14]
input [5:0] io_lsu_req_bits_0_bits_uop_pc_lob, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_taken, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_imm_rename, // @[dcache.scala:444:14]
input [2:0] io_lsu_req_bits_0_bits_uop_imm_sel, // @[dcache.scala:444:14]
input [4:0] io_lsu_req_bits_0_bits_uop_pimm, // @[dcache.scala:444:14]
input [19:0] io_lsu_req_bits_0_bits_uop_imm_packed, // @[dcache.scala:444:14]
input [1:0] io_lsu_req_bits_0_bits_uop_op1_sel, // @[dcache.scala:444:14]
input [2:0] io_lsu_req_bits_0_bits_uop_op2_sel, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fp_ctrl_ldst, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fp_ctrl_wen, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fp_ctrl_ren1, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fp_ctrl_ren2, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fp_ctrl_ren3, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fp_ctrl_swap12, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fp_ctrl_swap23, // @[dcache.scala:444:14]
input [1:0] io_lsu_req_bits_0_bits_uop_fp_ctrl_typeTagIn, // @[dcache.scala:444:14]
input [1:0] io_lsu_req_bits_0_bits_uop_fp_ctrl_typeTagOut, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fp_ctrl_fromint, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fp_ctrl_toint, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fp_ctrl_fastpipe, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fp_ctrl_fma, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fp_ctrl_div, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fp_ctrl_sqrt, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fp_ctrl_wflags, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fp_ctrl_vec, // @[dcache.scala:444:14]
input [4:0] io_lsu_req_bits_0_bits_uop_rob_idx, // @[dcache.scala:444:14]
input [3:0] io_lsu_req_bits_0_bits_uop_ldq_idx, // @[dcache.scala:444:14]
input [3:0] io_lsu_req_bits_0_bits_uop_stq_idx, // @[dcache.scala:444:14]
input [1:0] io_lsu_req_bits_0_bits_uop_rxq_idx, // @[dcache.scala:444:14]
input [5:0] io_lsu_req_bits_0_bits_uop_pdst, // @[dcache.scala:444:14]
input [5:0] io_lsu_req_bits_0_bits_uop_prs1, // @[dcache.scala:444:14]
input [5:0] io_lsu_req_bits_0_bits_uop_prs2, // @[dcache.scala:444:14]
input [5:0] io_lsu_req_bits_0_bits_uop_prs3, // @[dcache.scala:444:14]
input [3:0] io_lsu_req_bits_0_bits_uop_ppred, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_prs1_busy, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_prs2_busy, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_prs3_busy, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_ppred_busy, // @[dcache.scala:444:14]
input [5:0] io_lsu_req_bits_0_bits_uop_stale_pdst, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_exception, // @[dcache.scala:444:14]
input [63:0] io_lsu_req_bits_0_bits_uop_exc_cause, // @[dcache.scala:444:14]
input [4:0] io_lsu_req_bits_0_bits_uop_mem_cmd, // @[dcache.scala:444:14]
input [1:0] io_lsu_req_bits_0_bits_uop_mem_size, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_mem_signed, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_uses_ldq, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_uses_stq, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_is_unique, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_flush_on_commit, // @[dcache.scala:444:14]
input [2:0] io_lsu_req_bits_0_bits_uop_csr_cmd, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_ldst_is_rs1, // @[dcache.scala:444:14]
input [5:0] io_lsu_req_bits_0_bits_uop_ldst, // @[dcache.scala:444:14]
input [5:0] io_lsu_req_bits_0_bits_uop_lrs1, // @[dcache.scala:444:14]
input [5:0] io_lsu_req_bits_0_bits_uop_lrs2, // @[dcache.scala:444:14]
input [5:0] io_lsu_req_bits_0_bits_uop_lrs3, // @[dcache.scala:444:14]
input [1:0] io_lsu_req_bits_0_bits_uop_dst_rtype, // @[dcache.scala:444:14]
input [1:0] io_lsu_req_bits_0_bits_uop_lrs1_rtype, // @[dcache.scala:444:14]
input [1:0] io_lsu_req_bits_0_bits_uop_lrs2_rtype, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_frs3_en, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fcn_dw, // @[dcache.scala:444:14]
input [4:0] io_lsu_req_bits_0_bits_uop_fcn_op, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_fp_val, // @[dcache.scala:444:14]
input [2:0] io_lsu_req_bits_0_bits_uop_fp_rm, // @[dcache.scala:444:14]
input [1:0] io_lsu_req_bits_0_bits_uop_fp_typ, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_xcpt_pf_if, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_xcpt_ae_if, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_xcpt_ma_if, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_bp_debug_if, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_uop_bp_xcpt_if, // @[dcache.scala:444:14]
input [2:0] io_lsu_req_bits_0_bits_uop_debug_fsrc, // @[dcache.scala:444:14]
input [2:0] io_lsu_req_bits_0_bits_uop_debug_tsrc, // @[dcache.scala:444:14]
input [33:0] io_lsu_req_bits_0_bits_addr, // @[dcache.scala:444:14]
input [63:0] io_lsu_req_bits_0_bits_data, // @[dcache.scala:444:14]
input io_lsu_req_bits_0_bits_is_hella, // @[dcache.scala:444:14]
input io_lsu_s1_kill_0, // @[dcache.scala:444:14]
output io_lsu_resp_0_valid, // @[dcache.scala:444:14]
output [31:0] io_lsu_resp_0_bits_uop_inst, // @[dcache.scala:444:14]
output [31:0] io_lsu_resp_0_bits_uop_debug_inst, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_is_rvc, // @[dcache.scala:444:14]
output [33:0] io_lsu_resp_0_bits_uop_debug_pc, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_iq_type_0, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_iq_type_1, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_iq_type_2, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_iq_type_3, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fu_code_0, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fu_code_1, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fu_code_2, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fu_code_3, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fu_code_4, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fu_code_5, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fu_code_6, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fu_code_7, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fu_code_8, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fu_code_9, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_iw_issued, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_iw_issued_partial_agen, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_iw_issued_partial_dgen, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_iw_p1_speculative_child, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_iw_p2_speculative_child, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_iw_p1_bypass_hint, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_iw_p2_bypass_hint, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_iw_p3_bypass_hint, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_dis_col_sel, // @[dcache.scala:444:14]
output [3:0] io_lsu_resp_0_bits_uop_br_mask, // @[dcache.scala:444:14]
output [1:0] io_lsu_resp_0_bits_uop_br_tag, // @[dcache.scala:444:14]
output [3:0] io_lsu_resp_0_bits_uop_br_type, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_is_sfb, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_is_fence, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_is_fencei, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_is_sfence, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_is_amo, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_is_eret, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_is_sys_pc2epc, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_is_rocc, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_is_mov, // @[dcache.scala:444:14]
output [3:0] io_lsu_resp_0_bits_uop_ftq_idx, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_edge_inst, // @[dcache.scala:444:14]
output [5:0] io_lsu_resp_0_bits_uop_pc_lob, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_taken, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_imm_rename, // @[dcache.scala:444:14]
output [2:0] io_lsu_resp_0_bits_uop_imm_sel, // @[dcache.scala:444:14]
output [4:0] io_lsu_resp_0_bits_uop_pimm, // @[dcache.scala:444:14]
output [19:0] io_lsu_resp_0_bits_uop_imm_packed, // @[dcache.scala:444:14]
output [1:0] io_lsu_resp_0_bits_uop_op1_sel, // @[dcache.scala:444:14]
output [2:0] io_lsu_resp_0_bits_uop_op2_sel, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fp_ctrl_ldst, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fp_ctrl_wen, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fp_ctrl_ren1, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fp_ctrl_ren2, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fp_ctrl_ren3, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fp_ctrl_swap12, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fp_ctrl_swap23, // @[dcache.scala:444:14]
output [1:0] io_lsu_resp_0_bits_uop_fp_ctrl_typeTagIn, // @[dcache.scala:444:14]
output [1:0] io_lsu_resp_0_bits_uop_fp_ctrl_typeTagOut, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fp_ctrl_fromint, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fp_ctrl_toint, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fp_ctrl_fastpipe, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fp_ctrl_fma, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fp_ctrl_div, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fp_ctrl_sqrt, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fp_ctrl_wflags, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fp_ctrl_vec, // @[dcache.scala:444:14]
output [4:0] io_lsu_resp_0_bits_uop_rob_idx, // @[dcache.scala:444:14]
output [3:0] io_lsu_resp_0_bits_uop_ldq_idx, // @[dcache.scala:444:14]
output [3:0] io_lsu_resp_0_bits_uop_stq_idx, // @[dcache.scala:444:14]
output [1:0] io_lsu_resp_0_bits_uop_rxq_idx, // @[dcache.scala:444:14]
output [5:0] io_lsu_resp_0_bits_uop_pdst, // @[dcache.scala:444:14]
output [5:0] io_lsu_resp_0_bits_uop_prs1, // @[dcache.scala:444:14]
output [5:0] io_lsu_resp_0_bits_uop_prs2, // @[dcache.scala:444:14]
output [5:0] io_lsu_resp_0_bits_uop_prs3, // @[dcache.scala:444:14]
output [3:0] io_lsu_resp_0_bits_uop_ppred, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_prs1_busy, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_prs2_busy, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_prs3_busy, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_ppred_busy, // @[dcache.scala:444:14]
output [5:0] io_lsu_resp_0_bits_uop_stale_pdst, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_exception, // @[dcache.scala:444:14]
output [63:0] io_lsu_resp_0_bits_uop_exc_cause, // @[dcache.scala:444:14]
output [4:0] io_lsu_resp_0_bits_uop_mem_cmd, // @[dcache.scala:444:14]
output [1:0] io_lsu_resp_0_bits_uop_mem_size, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_mem_signed, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_uses_ldq, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_uses_stq, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_is_unique, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_flush_on_commit, // @[dcache.scala:444:14]
output [2:0] io_lsu_resp_0_bits_uop_csr_cmd, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_ldst_is_rs1, // @[dcache.scala:444:14]
output [5:0] io_lsu_resp_0_bits_uop_ldst, // @[dcache.scala:444:14]
output [5:0] io_lsu_resp_0_bits_uop_lrs1, // @[dcache.scala:444:14]
output [5:0] io_lsu_resp_0_bits_uop_lrs2, // @[dcache.scala:444:14]
output [5:0] io_lsu_resp_0_bits_uop_lrs3, // @[dcache.scala:444:14]
output [1:0] io_lsu_resp_0_bits_uop_dst_rtype, // @[dcache.scala:444:14]
output [1:0] io_lsu_resp_0_bits_uop_lrs1_rtype, // @[dcache.scala:444:14]
output [1:0] io_lsu_resp_0_bits_uop_lrs2_rtype, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_frs3_en, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fcn_dw, // @[dcache.scala:444:14]
output [4:0] io_lsu_resp_0_bits_uop_fcn_op, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_fp_val, // @[dcache.scala:444:14]
output [2:0] io_lsu_resp_0_bits_uop_fp_rm, // @[dcache.scala:444:14]
output [1:0] io_lsu_resp_0_bits_uop_fp_typ, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_xcpt_pf_if, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_xcpt_ae_if, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_xcpt_ma_if, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_bp_debug_if, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_uop_bp_xcpt_if, // @[dcache.scala:444:14]
output [2:0] io_lsu_resp_0_bits_uop_debug_fsrc, // @[dcache.scala:444:14]
output [2:0] io_lsu_resp_0_bits_uop_debug_tsrc, // @[dcache.scala:444:14]
output [63:0] io_lsu_resp_0_bits_data, // @[dcache.scala:444:14]
output io_lsu_resp_0_bits_is_hella, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_valid, // @[dcache.scala:444:14]
output [31:0] io_lsu_store_ack_0_bits_uop_inst, // @[dcache.scala:444:14]
output [31:0] io_lsu_store_ack_0_bits_uop_debug_inst, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_is_rvc, // @[dcache.scala:444:14]
output [33:0] io_lsu_store_ack_0_bits_uop_debug_pc, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_iq_type_0, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_iq_type_1, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_iq_type_2, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_iq_type_3, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fu_code_0, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fu_code_1, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fu_code_2, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fu_code_3, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fu_code_4, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fu_code_5, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fu_code_6, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fu_code_7, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fu_code_8, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fu_code_9, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_iw_issued, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_iw_issued_partial_agen, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_iw_issued_partial_dgen, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_iw_p1_speculative_child, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_iw_p2_speculative_child, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_iw_p1_bypass_hint, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_iw_p2_bypass_hint, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_iw_p3_bypass_hint, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_dis_col_sel, // @[dcache.scala:444:14]
output [3:0] io_lsu_store_ack_0_bits_uop_br_mask, // @[dcache.scala:444:14]
output [1:0] io_lsu_store_ack_0_bits_uop_br_tag, // @[dcache.scala:444:14]
output [3:0] io_lsu_store_ack_0_bits_uop_br_type, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_is_sfb, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_is_fence, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_is_fencei, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_is_sfence, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_is_amo, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_is_eret, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_is_sys_pc2epc, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_is_rocc, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_is_mov, // @[dcache.scala:444:14]
output [3:0] io_lsu_store_ack_0_bits_uop_ftq_idx, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_edge_inst, // @[dcache.scala:444:14]
output [5:0] io_lsu_store_ack_0_bits_uop_pc_lob, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_taken, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_imm_rename, // @[dcache.scala:444:14]
output [2:0] io_lsu_store_ack_0_bits_uop_imm_sel, // @[dcache.scala:444:14]
output [4:0] io_lsu_store_ack_0_bits_uop_pimm, // @[dcache.scala:444:14]
output [19:0] io_lsu_store_ack_0_bits_uop_imm_packed, // @[dcache.scala:444:14]
output [1:0] io_lsu_store_ack_0_bits_uop_op1_sel, // @[dcache.scala:444:14]
output [2:0] io_lsu_store_ack_0_bits_uop_op2_sel, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fp_ctrl_ldst, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fp_ctrl_wen, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fp_ctrl_ren1, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fp_ctrl_ren2, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fp_ctrl_ren3, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fp_ctrl_swap12, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fp_ctrl_swap23, // @[dcache.scala:444:14]
output [1:0] io_lsu_store_ack_0_bits_uop_fp_ctrl_typeTagIn, // @[dcache.scala:444:14]
output [1:0] io_lsu_store_ack_0_bits_uop_fp_ctrl_typeTagOut, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fp_ctrl_fromint, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fp_ctrl_toint, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fp_ctrl_fastpipe, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fp_ctrl_fma, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fp_ctrl_div, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fp_ctrl_sqrt, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fp_ctrl_wflags, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fp_ctrl_vec, // @[dcache.scala:444:14]
output [4:0] io_lsu_store_ack_0_bits_uop_rob_idx, // @[dcache.scala:444:14]
output [3:0] io_lsu_store_ack_0_bits_uop_ldq_idx, // @[dcache.scala:444:14]
output [3:0] io_lsu_store_ack_0_bits_uop_stq_idx, // @[dcache.scala:444:14]
output [1:0] io_lsu_store_ack_0_bits_uop_rxq_idx, // @[dcache.scala:444:14]
output [5:0] io_lsu_store_ack_0_bits_uop_pdst, // @[dcache.scala:444:14]
output [5:0] io_lsu_store_ack_0_bits_uop_prs1, // @[dcache.scala:444:14]
output [5:0] io_lsu_store_ack_0_bits_uop_prs2, // @[dcache.scala:444:14]
output [5:0] io_lsu_store_ack_0_bits_uop_prs3, // @[dcache.scala:444:14]
output [3:0] io_lsu_store_ack_0_bits_uop_ppred, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_prs1_busy, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_prs2_busy, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_prs3_busy, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_ppred_busy, // @[dcache.scala:444:14]
output [5:0] io_lsu_store_ack_0_bits_uop_stale_pdst, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_exception, // @[dcache.scala:444:14]
output [63:0] io_lsu_store_ack_0_bits_uop_exc_cause, // @[dcache.scala:444:14]
output [4:0] io_lsu_store_ack_0_bits_uop_mem_cmd, // @[dcache.scala:444:14]
output [1:0] io_lsu_store_ack_0_bits_uop_mem_size, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_mem_signed, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_uses_ldq, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_uses_stq, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_is_unique, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_flush_on_commit, // @[dcache.scala:444:14]
output [2:0] io_lsu_store_ack_0_bits_uop_csr_cmd, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_ldst_is_rs1, // @[dcache.scala:444:14]
output [5:0] io_lsu_store_ack_0_bits_uop_ldst, // @[dcache.scala:444:14]
output [5:0] io_lsu_store_ack_0_bits_uop_lrs1, // @[dcache.scala:444:14]
output [5:0] io_lsu_store_ack_0_bits_uop_lrs2, // @[dcache.scala:444:14]
output [5:0] io_lsu_store_ack_0_bits_uop_lrs3, // @[dcache.scala:444:14]
output [1:0] io_lsu_store_ack_0_bits_uop_dst_rtype, // @[dcache.scala:444:14]
output [1:0] io_lsu_store_ack_0_bits_uop_lrs1_rtype, // @[dcache.scala:444:14]
output [1:0] io_lsu_store_ack_0_bits_uop_lrs2_rtype, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_frs3_en, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fcn_dw, // @[dcache.scala:444:14]
output [4:0] io_lsu_store_ack_0_bits_uop_fcn_op, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_fp_val, // @[dcache.scala:444:14]
output [2:0] io_lsu_store_ack_0_bits_uop_fp_rm, // @[dcache.scala:444:14]
output [1:0] io_lsu_store_ack_0_bits_uop_fp_typ, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_xcpt_pf_if, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_xcpt_ae_if, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_xcpt_ma_if, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_bp_debug_if, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_uop_bp_xcpt_if, // @[dcache.scala:444:14]
output [2:0] io_lsu_store_ack_0_bits_uop_debug_fsrc, // @[dcache.scala:444:14]
output [2:0] io_lsu_store_ack_0_bits_uop_debug_tsrc, // @[dcache.scala:444:14]
output [33:0] io_lsu_store_ack_0_bits_addr, // @[dcache.scala:444:14]
output [63:0] io_lsu_store_ack_0_bits_data, // @[dcache.scala:444:14]
output io_lsu_store_ack_0_bits_is_hella, // @[dcache.scala:444:14]
output io_lsu_nack_0_valid, // @[dcache.scala:444:14]
output [31:0] io_lsu_nack_0_bits_uop_inst, // @[dcache.scala:444:14]
output [31:0] io_lsu_nack_0_bits_uop_debug_inst, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_is_rvc, // @[dcache.scala:444:14]
output [33:0] io_lsu_nack_0_bits_uop_debug_pc, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_iq_type_0, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_iq_type_1, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_iq_type_2, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_iq_type_3, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fu_code_0, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fu_code_1, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fu_code_2, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fu_code_3, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fu_code_4, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fu_code_5, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fu_code_6, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fu_code_7, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fu_code_8, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fu_code_9, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_iw_issued, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_iw_issued_partial_agen, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_iw_issued_partial_dgen, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_iw_p1_speculative_child, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_iw_p2_speculative_child, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_iw_p1_bypass_hint, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_iw_p2_bypass_hint, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_iw_p3_bypass_hint, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_dis_col_sel, // @[dcache.scala:444:14]
output [3:0] io_lsu_nack_0_bits_uop_br_mask, // @[dcache.scala:444:14]
output [1:0] io_lsu_nack_0_bits_uop_br_tag, // @[dcache.scala:444:14]
output [3:0] io_lsu_nack_0_bits_uop_br_type, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_is_sfb, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_is_fence, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_is_fencei, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_is_sfence, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_is_amo, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_is_eret, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_is_sys_pc2epc, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_is_rocc, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_is_mov, // @[dcache.scala:444:14]
output [3:0] io_lsu_nack_0_bits_uop_ftq_idx, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_edge_inst, // @[dcache.scala:444:14]
output [5:0] io_lsu_nack_0_bits_uop_pc_lob, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_taken, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_imm_rename, // @[dcache.scala:444:14]
output [2:0] io_lsu_nack_0_bits_uop_imm_sel, // @[dcache.scala:444:14]
output [4:0] io_lsu_nack_0_bits_uop_pimm, // @[dcache.scala:444:14]
output [19:0] io_lsu_nack_0_bits_uop_imm_packed, // @[dcache.scala:444:14]
output [1:0] io_lsu_nack_0_bits_uop_op1_sel, // @[dcache.scala:444:14]
output [2:0] io_lsu_nack_0_bits_uop_op2_sel, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fp_ctrl_ldst, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fp_ctrl_wen, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fp_ctrl_ren1, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fp_ctrl_ren2, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fp_ctrl_ren3, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fp_ctrl_swap12, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fp_ctrl_swap23, // @[dcache.scala:444:14]
output [1:0] io_lsu_nack_0_bits_uop_fp_ctrl_typeTagIn, // @[dcache.scala:444:14]
output [1:0] io_lsu_nack_0_bits_uop_fp_ctrl_typeTagOut, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fp_ctrl_fromint, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fp_ctrl_toint, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fp_ctrl_fastpipe, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fp_ctrl_fma, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fp_ctrl_div, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fp_ctrl_sqrt, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fp_ctrl_wflags, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fp_ctrl_vec, // @[dcache.scala:444:14]
output [4:0] io_lsu_nack_0_bits_uop_rob_idx, // @[dcache.scala:444:14]
output [3:0] io_lsu_nack_0_bits_uop_ldq_idx, // @[dcache.scala:444:14]
output [3:0] io_lsu_nack_0_bits_uop_stq_idx, // @[dcache.scala:444:14]
output [1:0] io_lsu_nack_0_bits_uop_rxq_idx, // @[dcache.scala:444:14]
output [5:0] io_lsu_nack_0_bits_uop_pdst, // @[dcache.scala:444:14]
output [5:0] io_lsu_nack_0_bits_uop_prs1, // @[dcache.scala:444:14]
output [5:0] io_lsu_nack_0_bits_uop_prs2, // @[dcache.scala:444:14]
output [5:0] io_lsu_nack_0_bits_uop_prs3, // @[dcache.scala:444:14]
output [3:0] io_lsu_nack_0_bits_uop_ppred, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_prs1_busy, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_prs2_busy, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_prs3_busy, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_ppred_busy, // @[dcache.scala:444:14]
output [5:0] io_lsu_nack_0_bits_uop_stale_pdst, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_exception, // @[dcache.scala:444:14]
output [63:0] io_lsu_nack_0_bits_uop_exc_cause, // @[dcache.scala:444:14]
output [4:0] io_lsu_nack_0_bits_uop_mem_cmd, // @[dcache.scala:444:14]
output [1:0] io_lsu_nack_0_bits_uop_mem_size, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_mem_signed, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_uses_ldq, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_uses_stq, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_is_unique, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_flush_on_commit, // @[dcache.scala:444:14]
output [2:0] io_lsu_nack_0_bits_uop_csr_cmd, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_ldst_is_rs1, // @[dcache.scala:444:14]
output [5:0] io_lsu_nack_0_bits_uop_ldst, // @[dcache.scala:444:14]
output [5:0] io_lsu_nack_0_bits_uop_lrs1, // @[dcache.scala:444:14]
output [5:0] io_lsu_nack_0_bits_uop_lrs2, // @[dcache.scala:444:14]
output [5:0] io_lsu_nack_0_bits_uop_lrs3, // @[dcache.scala:444:14]
output [1:0] io_lsu_nack_0_bits_uop_dst_rtype, // @[dcache.scala:444:14]
output [1:0] io_lsu_nack_0_bits_uop_lrs1_rtype, // @[dcache.scala:444:14]
output [1:0] io_lsu_nack_0_bits_uop_lrs2_rtype, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_frs3_en, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fcn_dw, // @[dcache.scala:444:14]
output [4:0] io_lsu_nack_0_bits_uop_fcn_op, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_fp_val, // @[dcache.scala:444:14]
output [2:0] io_lsu_nack_0_bits_uop_fp_rm, // @[dcache.scala:444:14]
output [1:0] io_lsu_nack_0_bits_uop_fp_typ, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_xcpt_pf_if, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_xcpt_ae_if, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_xcpt_ma_if, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_bp_debug_if, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_uop_bp_xcpt_if, // @[dcache.scala:444:14]
output [2:0] io_lsu_nack_0_bits_uop_debug_fsrc, // @[dcache.scala:444:14]
output [2:0] io_lsu_nack_0_bits_uop_debug_tsrc, // @[dcache.scala:444:14]
output [33:0] io_lsu_nack_0_bits_addr, // @[dcache.scala:444:14]
output [63:0] io_lsu_nack_0_bits_data, // @[dcache.scala:444:14]
output io_lsu_nack_0_bits_is_hella, // @[dcache.scala:444:14]
input io_lsu_ll_resp_ready, // @[dcache.scala:444:14]
output io_lsu_ll_resp_valid, // @[dcache.scala:444:14]
output [31:0] io_lsu_ll_resp_bits_uop_inst, // @[dcache.scala:444:14]
output [31:0] io_lsu_ll_resp_bits_uop_debug_inst, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_is_rvc, // @[dcache.scala:444:14]
output [33:0] io_lsu_ll_resp_bits_uop_debug_pc, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_iq_type_0, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_iq_type_1, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_iq_type_2, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_iq_type_3, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fu_code_0, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fu_code_1, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fu_code_2, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fu_code_3, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fu_code_4, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fu_code_5, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fu_code_6, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fu_code_7, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fu_code_8, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fu_code_9, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_iw_issued, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_iw_issued_partial_agen, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_iw_issued_partial_dgen, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_iw_p1_speculative_child, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_iw_p2_speculative_child, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_iw_p1_bypass_hint, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_iw_p2_bypass_hint, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_iw_p3_bypass_hint, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_dis_col_sel, // @[dcache.scala:444:14]
output [3:0] io_lsu_ll_resp_bits_uop_br_mask, // @[dcache.scala:444:14]
output [1:0] io_lsu_ll_resp_bits_uop_br_tag, // @[dcache.scala:444:14]
output [3:0] io_lsu_ll_resp_bits_uop_br_type, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_is_sfb, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_is_fence, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_is_fencei, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_is_sfence, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_is_amo, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_is_eret, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_is_sys_pc2epc, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_is_rocc, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_is_mov, // @[dcache.scala:444:14]
output [3:0] io_lsu_ll_resp_bits_uop_ftq_idx, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_edge_inst, // @[dcache.scala:444:14]
output [5:0] io_lsu_ll_resp_bits_uop_pc_lob, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_taken, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_imm_rename, // @[dcache.scala:444:14]
output [2:0] io_lsu_ll_resp_bits_uop_imm_sel, // @[dcache.scala:444:14]
output [4:0] io_lsu_ll_resp_bits_uop_pimm, // @[dcache.scala:444:14]
output [19:0] io_lsu_ll_resp_bits_uop_imm_packed, // @[dcache.scala:444:14]
output [1:0] io_lsu_ll_resp_bits_uop_op1_sel, // @[dcache.scala:444:14]
output [2:0] io_lsu_ll_resp_bits_uop_op2_sel, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fp_ctrl_ldst, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fp_ctrl_wen, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fp_ctrl_ren1, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fp_ctrl_ren2, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fp_ctrl_ren3, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fp_ctrl_swap12, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fp_ctrl_swap23, // @[dcache.scala:444:14]
output [1:0] io_lsu_ll_resp_bits_uop_fp_ctrl_typeTagIn, // @[dcache.scala:444:14]
output [1:0] io_lsu_ll_resp_bits_uop_fp_ctrl_typeTagOut, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fp_ctrl_fromint, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fp_ctrl_toint, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fp_ctrl_fastpipe, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fp_ctrl_fma, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fp_ctrl_div, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fp_ctrl_sqrt, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fp_ctrl_wflags, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fp_ctrl_vec, // @[dcache.scala:444:14]
output [4:0] io_lsu_ll_resp_bits_uop_rob_idx, // @[dcache.scala:444:14]
output [3:0] io_lsu_ll_resp_bits_uop_ldq_idx, // @[dcache.scala:444:14]
output [3:0] io_lsu_ll_resp_bits_uop_stq_idx, // @[dcache.scala:444:14]
output [1:0] io_lsu_ll_resp_bits_uop_rxq_idx, // @[dcache.scala:444:14]
output [5:0] io_lsu_ll_resp_bits_uop_pdst, // @[dcache.scala:444:14]
output [5:0] io_lsu_ll_resp_bits_uop_prs1, // @[dcache.scala:444:14]
output [5:0] io_lsu_ll_resp_bits_uop_prs2, // @[dcache.scala:444:14]
output [5:0] io_lsu_ll_resp_bits_uop_prs3, // @[dcache.scala:444:14]
output [3:0] io_lsu_ll_resp_bits_uop_ppred, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_prs1_busy, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_prs2_busy, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_prs3_busy, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_ppred_busy, // @[dcache.scala:444:14]
output [5:0] io_lsu_ll_resp_bits_uop_stale_pdst, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_exception, // @[dcache.scala:444:14]
output [63:0] io_lsu_ll_resp_bits_uop_exc_cause, // @[dcache.scala:444:14]
output [4:0] io_lsu_ll_resp_bits_uop_mem_cmd, // @[dcache.scala:444:14]
output [1:0] io_lsu_ll_resp_bits_uop_mem_size, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_mem_signed, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_uses_ldq, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_uses_stq, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_is_unique, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_flush_on_commit, // @[dcache.scala:444:14]
output [2:0] io_lsu_ll_resp_bits_uop_csr_cmd, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_ldst_is_rs1, // @[dcache.scala:444:14]
output [5:0] io_lsu_ll_resp_bits_uop_ldst, // @[dcache.scala:444:14]
output [5:0] io_lsu_ll_resp_bits_uop_lrs1, // @[dcache.scala:444:14]
output [5:0] io_lsu_ll_resp_bits_uop_lrs2, // @[dcache.scala:444:14]
output [5:0] io_lsu_ll_resp_bits_uop_lrs3, // @[dcache.scala:444:14]
output [1:0] io_lsu_ll_resp_bits_uop_dst_rtype, // @[dcache.scala:444:14]
output [1:0] io_lsu_ll_resp_bits_uop_lrs1_rtype, // @[dcache.scala:444:14]
output [1:0] io_lsu_ll_resp_bits_uop_lrs2_rtype, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_frs3_en, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fcn_dw, // @[dcache.scala:444:14]
output [4:0] io_lsu_ll_resp_bits_uop_fcn_op, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_fp_val, // @[dcache.scala:444:14]
output [2:0] io_lsu_ll_resp_bits_uop_fp_rm, // @[dcache.scala:444:14]
output [1:0] io_lsu_ll_resp_bits_uop_fp_typ, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_xcpt_pf_if, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_xcpt_ae_if, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_xcpt_ma_if, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_bp_debug_if, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_uop_bp_xcpt_if, // @[dcache.scala:444:14]
output [2:0] io_lsu_ll_resp_bits_uop_debug_fsrc, // @[dcache.scala:444:14]
output [2:0] io_lsu_ll_resp_bits_uop_debug_tsrc, // @[dcache.scala:444:14]
output [63:0] io_lsu_ll_resp_bits_data, // @[dcache.scala:444:14]
output io_lsu_ll_resp_bits_is_hella, // @[dcache.scala:444:14]
input [4:0] io_lsu_rob_pnr_idx, // @[dcache.scala:444:14]
input [4:0] io_lsu_rob_head_idx, // @[dcache.scala:444:14]
input io_lsu_release_ready, // @[dcache.scala:444:14]
output io_lsu_release_valid, // @[dcache.scala:444:14]
output [2:0] io_lsu_release_bits_opcode, // @[dcache.scala:444:14]
output [2:0] io_lsu_release_bits_param, // @[dcache.scala:444:14]
output [3:0] io_lsu_release_bits_size, // @[dcache.scala:444:14]
output [3:0] io_lsu_release_bits_source, // @[dcache.scala:444:14]
output [31:0] io_lsu_release_bits_address, // @[dcache.scala:444:14]
output [63:0] io_lsu_release_bits_data, // @[dcache.scala:444:14]
input io_lsu_force_order, // @[dcache.scala:444:14]
output io_lsu_ordered, // @[dcache.scala:444:14]
output io_lsu_perf_acquire, // @[dcache.scala:444:14]
output io_lsu_perf_release // @[dcache.scala:444:14]
);
wire [1:0] s3_req_uop_mem_size; // @[dcache.scala:895:22]
wire [1:0] _s2_repl_meta_WIRE_1_coh_state; // @[Mux.scala:30:73]
wire [21:0] _s2_repl_meta_WIRE_1_tag; // @[Mux.scala:30:73]
wire [1:0] _s2_hit_state_WIRE_1_state; // @[Mux.scala:30:73]
wire _mshrs_io_replay_ready_T; // @[dcache.scala:534:58]
wire _lsu_release_arb_io_in_0_ready; // @[dcache.scala:857:31]
wire _lsu_release_arb_io_in_1_ready; // @[dcache.scala:857:31]
wire _wbArb_io_in_0_ready; // @[dcache.scala:848:21]
wire _wbArb_io_in_1_ready; // @[dcache.scala:848:21]
wire _wbArb_io_out_valid; // @[dcache.scala:848:21]
wire [21:0] _wbArb_io_out_bits_tag; // @[dcache.scala:848:21]
wire [3:0] _wbArb_io_out_bits_idx; // @[dcache.scala:848:21]
wire [3:0] _wbArb_io_out_bits_source; // @[dcache.scala:848:21]
wire [2:0] _wbArb_io_out_bits_param; // @[dcache.scala:848:21]
wire [1:0] _wbArb_io_out_bits_way_en; // @[dcache.scala:848:21]
wire _wbArb_io_out_bits_voluntary; // @[dcache.scala:848:21]
wire _lfsr_prng_io_out_0; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_1; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_2; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_3; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_4; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_5; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_6; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_7; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_8; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_9; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_10; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_11; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_12; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_13; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_14; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_15; // @[PRNG.scala:91:22]
wire _dataReadArb_io_in_1_ready; // @[dcache.scala:490:27]
wire _dataReadArb_io_in_2_ready; // @[dcache.scala:490:27]
wire _dataReadArb_io_out_valid; // @[dcache.scala:490:27]
wire [1:0] _dataReadArb_io_out_bits_req_0_way_en; // @[dcache.scala:490:27]
wire [9:0] _dataReadArb_io_out_bits_req_0_addr; // @[dcache.scala:490:27]
wire _dataReadArb_io_out_bits_valid_0; // @[dcache.scala:490:27]
wire _dataWriteArb_io_in_1_ready; // @[dcache.scala:488:28]
wire [1:0] _dataWriteArb_io_out_bits_way_en; // @[dcache.scala:488:28]
wire [9:0] _dataWriteArb_io_out_bits_addr; // @[dcache.scala:488:28]
wire [63:0] _dataWriteArb_io_out_bits_data; // @[dcache.scala:488:28]
wire _metaReadArb_io_in_1_ready; // @[dcache.scala:472:27]
wire _metaReadArb_io_in_2_ready; // @[dcache.scala:472:27]
wire _metaReadArb_io_in_3_ready; // @[dcache.scala:472:27]
wire _metaReadArb_io_in_4_ready; // @[dcache.scala:472:27]
wire _metaReadArb_io_in_5_ready; // @[dcache.scala:472:27]
wire _metaReadArb_io_out_valid; // @[dcache.scala:472:27]
wire [3:0] _metaReadArb_io_out_bits_req_0_idx; // @[dcache.scala:472:27]
wire [1:0] _metaReadArb_io_out_bits_req_0_way_en; // @[dcache.scala:472:27]
wire [21:0] _metaReadArb_io_out_bits_req_0_tag; // @[dcache.scala:472:27]
wire _metaWriteArb_io_in_0_ready; // @[dcache.scala:470:28]
wire _metaWriteArb_io_in_1_ready; // @[dcache.scala:470:28]
wire _metaWriteArb_io_out_valid; // @[dcache.scala:470:28]
wire [3:0] _metaWriteArb_io_out_bits_idx; // @[dcache.scala:470:28]
wire [1:0] _metaWriteArb_io_out_bits_way_en; // @[dcache.scala:470:28]
wire [21:0] _metaWriteArb_io_out_bits_tag; // @[dcache.scala:470:28]
wire [1:0] _metaWriteArb_io_out_bits_data_coh_state; // @[dcache.scala:470:28]
wire [21:0] _metaWriteArb_io_out_bits_data_tag; // @[dcache.scala:470:28]
wire _meta_0_io_read_ready; // @[dcache.scala:469:41]
wire _meta_0_io_write_ready; // @[dcache.scala:469:41]
wire [1:0] _meta_0_io_resp_0_coh_state; // @[dcache.scala:469:41]
wire [21:0] _meta_0_io_resp_0_tag; // @[dcache.scala:469:41]
wire [1:0] _meta_0_io_resp_1_coh_state; // @[dcache.scala:469:41]
wire [21:0] _meta_0_io_resp_1_tag; // @[dcache.scala:469:41]
wire _mshrs_io_req_0_ready; // @[dcache.scala:460:21]
wire _mshrs_io_resp_valid; // @[dcache.scala:460:21]
wire _mshrs_io_secondary_miss_0; // @[dcache.scala:460:21]
wire _mshrs_io_block_hit_0; // @[dcache.scala:460:21]
wire _mshrs_io_mem_grant_ready; // @[dcache.scala:460:21]
wire _mshrs_io_refill_valid; // @[dcache.scala:460:21]
wire [1:0] _mshrs_io_refill_bits_way_en; // @[dcache.scala:460:21]
wire [9:0] _mshrs_io_refill_bits_addr; // @[dcache.scala:460:21]
wire [63:0] _mshrs_io_refill_bits_data; // @[dcache.scala:460:21]
wire _mshrs_io_meta_write_valid; // @[dcache.scala:460:21]
wire [3:0] _mshrs_io_meta_write_bits_idx; // @[dcache.scala:460:21]
wire [1:0] _mshrs_io_meta_write_bits_way_en; // @[dcache.scala:460:21]
wire [21:0] _mshrs_io_meta_write_bits_tag; // @[dcache.scala:460:21]
wire [1:0] _mshrs_io_meta_write_bits_data_coh_state; // @[dcache.scala:460:21]
wire [21:0] _mshrs_io_meta_write_bits_data_tag; // @[dcache.scala:460:21]
wire _mshrs_io_meta_read_valid; // @[dcache.scala:460:21]
wire [3:0] _mshrs_io_meta_read_bits_idx; // @[dcache.scala:460:21]
wire [1:0] _mshrs_io_meta_read_bits_way_en; // @[dcache.scala:460:21]
wire [21:0] _mshrs_io_meta_read_bits_tag; // @[dcache.scala:460:21]
wire _mshrs_io_replay_valid; // @[dcache.scala:460:21]
wire [4:0] _mshrs_io_replay_bits_uop_mem_cmd; // @[dcache.scala:460:21]
wire [33:0] _mshrs_io_replay_bits_addr; // @[dcache.scala:460:21]
wire [1:0] _mshrs_io_replay_bits_way_en; // @[dcache.scala:460:21]
wire _mshrs_io_wb_req_valid; // @[dcache.scala:460:21]
wire [21:0] _mshrs_io_wb_req_bits_tag; // @[dcache.scala:460:21]
wire [3:0] _mshrs_io_wb_req_bits_idx; // @[dcache.scala:460:21]
wire [3:0] _mshrs_io_wb_req_bits_source; // @[dcache.scala:460:21]
wire [2:0] _mshrs_io_wb_req_bits_param; // @[dcache.scala:460:21]
wire [1:0] _mshrs_io_wb_req_bits_way_en; // @[dcache.scala:460:21]
wire _mshrs_io_fence_rdy; // @[dcache.scala:460:21]
wire _mshrs_io_probe_rdy; // @[dcache.scala:460:21]
wire _prober_io_req_ready; // @[dcache.scala:459:22]
wire _prober_io_rep_valid; // @[dcache.scala:459:22]
wire [2:0] _prober_io_rep_bits_param; // @[dcache.scala:459:22]
wire [3:0] _prober_io_rep_bits_size; // @[dcache.scala:459:22]
wire [3:0] _prober_io_rep_bits_source; // @[dcache.scala:459:22]
wire [31:0] _prober_io_rep_bits_address; // @[dcache.scala:459:22]
wire _prober_io_meta_read_valid; // @[dcache.scala:459:22]
wire [3:0] _prober_io_meta_read_bits_idx; // @[dcache.scala:459:22]
wire [21:0] _prober_io_meta_read_bits_tag; // @[dcache.scala:459:22]
wire _prober_io_meta_write_valid; // @[dcache.scala:459:22]
wire [3:0] _prober_io_meta_write_bits_idx; // @[dcache.scala:459:22]
wire [1:0] _prober_io_meta_write_bits_way_en; // @[dcache.scala:459:22]
wire [21:0] _prober_io_meta_write_bits_tag; // @[dcache.scala:459:22]
wire [1:0] _prober_io_meta_write_bits_data_coh_state; // @[dcache.scala:459:22]
wire [21:0] _prober_io_meta_write_bits_data_tag; // @[dcache.scala:459:22]
wire _prober_io_wb_req_valid; // @[dcache.scala:459:22]
wire [21:0] _prober_io_wb_req_bits_tag; // @[dcache.scala:459:22]
wire [3:0] _prober_io_wb_req_bits_idx; // @[dcache.scala:459:22]
wire [3:0] _prober_io_wb_req_bits_source; // @[dcache.scala:459:22]
wire [2:0] _prober_io_wb_req_bits_param; // @[dcache.scala:459:22]
wire [1:0] _prober_io_wb_req_bits_way_en; // @[dcache.scala:459:22]
wire _prober_io_mshr_wb_rdy; // @[dcache.scala:459:22]
wire _prober_io_lsu_release_valid; // @[dcache.scala:459:22]
wire [2:0] _prober_io_lsu_release_bits_param; // @[dcache.scala:459:22]
wire [3:0] _prober_io_lsu_release_bits_size; // @[dcache.scala:459:22]
wire [3:0] _prober_io_lsu_release_bits_source; // @[dcache.scala:459:22]
wire [31:0] _prober_io_lsu_release_bits_address; // @[dcache.scala:459:22]
wire _prober_io_state_valid; // @[dcache.scala:459:22]
wire [33:0] _prober_io_state_bits; // @[dcache.scala:459:22]
wire _wb_io_req_ready; // @[dcache.scala:458:18]
wire _wb_io_meta_read_valid; // @[dcache.scala:458:18]
wire [3:0] _wb_io_meta_read_bits_idx; // @[dcache.scala:458:18]
wire [21:0] _wb_io_meta_read_bits_tag; // @[dcache.scala:458:18]
wire _wb_io_resp; // @[dcache.scala:458:18]
wire _wb_io_idx_valid; // @[dcache.scala:458:18]
wire [3:0] _wb_io_idx_bits; // @[dcache.scala:458:18]
wire _wb_io_data_req_valid; // @[dcache.scala:458:18]
wire [1:0] _wb_io_data_req_bits_way_en; // @[dcache.scala:458:18]
wire [9:0] _wb_io_data_req_bits_addr; // @[dcache.scala:458:18]
wire _wb_io_release_valid; // @[dcache.scala:458:18]
wire [2:0] _wb_io_release_bits_opcode; // @[dcache.scala:458:18]
wire [2:0] _wb_io_release_bits_param; // @[dcache.scala:458:18]
wire [3:0] _wb_io_release_bits_source; // @[dcache.scala:458:18]
wire [31:0] _wb_io_release_bits_address; // @[dcache.scala:458:18]
wire [63:0] _wb_io_release_bits_data; // @[dcache.scala:458:18]
wire _wb_io_lsu_release_valid; // @[dcache.scala:458:18]
wire [2:0] _wb_io_lsu_release_bits_param; // @[dcache.scala:458:18]
wire [3:0] _wb_io_lsu_release_bits_source; // @[dcache.scala:458:18]
wire [31:0] _wb_io_lsu_release_bits_address; // @[dcache.scala:458:18]
wire [63:0] _wb_io_lsu_release_bits_data; // @[dcache.scala:458:18]
wire auto_out_a_ready_0 = auto_out_a_ready; // @[dcache.scala:438:7]
wire auto_out_b_valid_0 = auto_out_b_valid; // @[dcache.scala:438:7]
wire [2:0] auto_out_b_bits_opcode_0 = auto_out_b_bits_opcode; // @[dcache.scala:438:7]
wire [1:0] auto_out_b_bits_param_0 = auto_out_b_bits_param; // @[dcache.scala:438:7]
wire [3:0] auto_out_b_bits_size_0 = auto_out_b_bits_size; // @[dcache.scala:438:7]
wire [3:0] auto_out_b_bits_source_0 = auto_out_b_bits_source; // @[dcache.scala:438:7]
wire [31:0] auto_out_b_bits_address_0 = auto_out_b_bits_address; // @[dcache.scala:438:7]
wire [7:0] auto_out_b_bits_mask_0 = auto_out_b_bits_mask; // @[dcache.scala:438:7]
wire [63:0] auto_out_b_bits_data_0 = auto_out_b_bits_data; // @[dcache.scala:438:7]
wire auto_out_b_bits_corrupt_0 = auto_out_b_bits_corrupt; // @[dcache.scala:438:7]
wire auto_out_c_ready_0 = auto_out_c_ready; // @[dcache.scala:438:7]
wire auto_out_d_valid_0 = auto_out_d_valid; // @[dcache.scala:438:7]
wire [2:0] auto_out_d_bits_opcode_0 = auto_out_d_bits_opcode; // @[dcache.scala:438:7]
wire [1:0] auto_out_d_bits_param_0 = auto_out_d_bits_param; // @[dcache.scala:438:7]
wire [3:0] auto_out_d_bits_size_0 = auto_out_d_bits_size; // @[dcache.scala:438:7]
wire [3:0] auto_out_d_bits_source_0 = auto_out_d_bits_source; // @[dcache.scala:438:7]
wire [2:0] auto_out_d_bits_sink_0 = auto_out_d_bits_sink; // @[dcache.scala:438:7]
wire auto_out_d_bits_denied_0 = auto_out_d_bits_denied; // @[dcache.scala:438:7]
wire [63:0] auto_out_d_bits_data_0 = auto_out_d_bits_data; // @[dcache.scala:438:7]
wire auto_out_d_bits_corrupt_0 = auto_out_d_bits_corrupt; // @[dcache.scala:438:7]
wire auto_out_e_ready_0 = auto_out_e_ready; // @[dcache.scala:438:7]
wire io_lsu_req_valid_0 = io_lsu_req_valid; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_valid_0 = io_lsu_req_bits_0_valid; // @[dcache.scala:438:7]
wire [31:0] io_lsu_req_bits_0_bits_uop_inst_0 = io_lsu_req_bits_0_bits_uop_inst; // @[dcache.scala:438:7]
wire [31:0] io_lsu_req_bits_0_bits_uop_debug_inst_0 = io_lsu_req_bits_0_bits_uop_debug_inst; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_is_rvc_0 = io_lsu_req_bits_0_bits_uop_is_rvc; // @[dcache.scala:438:7]
wire [33:0] io_lsu_req_bits_0_bits_uop_debug_pc_0 = io_lsu_req_bits_0_bits_uop_debug_pc; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_iq_type_0_0 = io_lsu_req_bits_0_bits_uop_iq_type_0; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_iq_type_1_0 = io_lsu_req_bits_0_bits_uop_iq_type_1; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_iq_type_2_0 = io_lsu_req_bits_0_bits_uop_iq_type_2; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_iq_type_3_0 = io_lsu_req_bits_0_bits_uop_iq_type_3; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fu_code_0_0 = io_lsu_req_bits_0_bits_uop_fu_code_0; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fu_code_1_0 = io_lsu_req_bits_0_bits_uop_fu_code_1; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fu_code_2_0 = io_lsu_req_bits_0_bits_uop_fu_code_2; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fu_code_3_0 = io_lsu_req_bits_0_bits_uop_fu_code_3; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fu_code_4_0 = io_lsu_req_bits_0_bits_uop_fu_code_4; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fu_code_5_0 = io_lsu_req_bits_0_bits_uop_fu_code_5; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fu_code_6_0 = io_lsu_req_bits_0_bits_uop_fu_code_6; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fu_code_7_0 = io_lsu_req_bits_0_bits_uop_fu_code_7; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fu_code_8_0 = io_lsu_req_bits_0_bits_uop_fu_code_8; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fu_code_9_0 = io_lsu_req_bits_0_bits_uop_fu_code_9; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_iw_issued_0 = io_lsu_req_bits_0_bits_uop_iw_issued; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_iw_issued_partial_agen_0 = io_lsu_req_bits_0_bits_uop_iw_issued_partial_agen; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_iw_issued_partial_dgen_0 = io_lsu_req_bits_0_bits_uop_iw_issued_partial_dgen; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_iw_p1_speculative_child_0 = io_lsu_req_bits_0_bits_uop_iw_p1_speculative_child; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_iw_p2_speculative_child_0 = io_lsu_req_bits_0_bits_uop_iw_p2_speculative_child; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_iw_p1_bypass_hint_0 = io_lsu_req_bits_0_bits_uop_iw_p1_bypass_hint; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_iw_p2_bypass_hint_0 = io_lsu_req_bits_0_bits_uop_iw_p2_bypass_hint; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_iw_p3_bypass_hint_0 = io_lsu_req_bits_0_bits_uop_iw_p3_bypass_hint; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_dis_col_sel_0 = io_lsu_req_bits_0_bits_uop_dis_col_sel; // @[dcache.scala:438:7]
wire [3:0] io_lsu_req_bits_0_bits_uop_br_mask_0 = io_lsu_req_bits_0_bits_uop_br_mask; // @[dcache.scala:438:7]
wire [1:0] io_lsu_req_bits_0_bits_uop_br_tag_0 = io_lsu_req_bits_0_bits_uop_br_tag; // @[dcache.scala:438:7]
wire [3:0] io_lsu_req_bits_0_bits_uop_br_type_0 = io_lsu_req_bits_0_bits_uop_br_type; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_is_sfb_0 = io_lsu_req_bits_0_bits_uop_is_sfb; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_is_fence_0 = io_lsu_req_bits_0_bits_uop_is_fence; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_is_fencei_0 = io_lsu_req_bits_0_bits_uop_is_fencei; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_is_sfence_0 = io_lsu_req_bits_0_bits_uop_is_sfence; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_is_amo_0 = io_lsu_req_bits_0_bits_uop_is_amo; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_is_eret_0 = io_lsu_req_bits_0_bits_uop_is_eret; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_is_sys_pc2epc_0 = io_lsu_req_bits_0_bits_uop_is_sys_pc2epc; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_is_rocc_0 = io_lsu_req_bits_0_bits_uop_is_rocc; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_is_mov_0 = io_lsu_req_bits_0_bits_uop_is_mov; // @[dcache.scala:438:7]
wire [3:0] io_lsu_req_bits_0_bits_uop_ftq_idx_0 = io_lsu_req_bits_0_bits_uop_ftq_idx; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_edge_inst_0 = io_lsu_req_bits_0_bits_uop_edge_inst; // @[dcache.scala:438:7]
wire [5:0] io_lsu_req_bits_0_bits_uop_pc_lob_0 = io_lsu_req_bits_0_bits_uop_pc_lob; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_taken_0 = io_lsu_req_bits_0_bits_uop_taken; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_imm_rename_0 = io_lsu_req_bits_0_bits_uop_imm_rename; // @[dcache.scala:438:7]
wire [2:0] io_lsu_req_bits_0_bits_uop_imm_sel_0 = io_lsu_req_bits_0_bits_uop_imm_sel; // @[dcache.scala:438:7]
wire [4:0] io_lsu_req_bits_0_bits_uop_pimm_0 = io_lsu_req_bits_0_bits_uop_pimm; // @[dcache.scala:438:7]
wire [19:0] io_lsu_req_bits_0_bits_uop_imm_packed_0 = io_lsu_req_bits_0_bits_uop_imm_packed; // @[dcache.scala:438:7]
wire [1:0] io_lsu_req_bits_0_bits_uop_op1_sel_0 = io_lsu_req_bits_0_bits_uop_op1_sel; // @[dcache.scala:438:7]
wire [2:0] io_lsu_req_bits_0_bits_uop_op2_sel_0 = io_lsu_req_bits_0_bits_uop_op2_sel; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fp_ctrl_ldst_0 = io_lsu_req_bits_0_bits_uop_fp_ctrl_ldst; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fp_ctrl_wen_0 = io_lsu_req_bits_0_bits_uop_fp_ctrl_wen; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fp_ctrl_ren1_0 = io_lsu_req_bits_0_bits_uop_fp_ctrl_ren1; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fp_ctrl_ren2_0 = io_lsu_req_bits_0_bits_uop_fp_ctrl_ren2; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fp_ctrl_ren3_0 = io_lsu_req_bits_0_bits_uop_fp_ctrl_ren3; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fp_ctrl_swap12_0 = io_lsu_req_bits_0_bits_uop_fp_ctrl_swap12; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fp_ctrl_swap23_0 = io_lsu_req_bits_0_bits_uop_fp_ctrl_swap23; // @[dcache.scala:438:7]
wire [1:0] io_lsu_req_bits_0_bits_uop_fp_ctrl_typeTagIn_0 = io_lsu_req_bits_0_bits_uop_fp_ctrl_typeTagIn; // @[dcache.scala:438:7]
wire [1:0] io_lsu_req_bits_0_bits_uop_fp_ctrl_typeTagOut_0 = io_lsu_req_bits_0_bits_uop_fp_ctrl_typeTagOut; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fp_ctrl_fromint_0 = io_lsu_req_bits_0_bits_uop_fp_ctrl_fromint; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fp_ctrl_toint_0 = io_lsu_req_bits_0_bits_uop_fp_ctrl_toint; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fp_ctrl_fastpipe_0 = io_lsu_req_bits_0_bits_uop_fp_ctrl_fastpipe; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fp_ctrl_fma_0 = io_lsu_req_bits_0_bits_uop_fp_ctrl_fma; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fp_ctrl_div_0 = io_lsu_req_bits_0_bits_uop_fp_ctrl_div; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fp_ctrl_sqrt_0 = io_lsu_req_bits_0_bits_uop_fp_ctrl_sqrt; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fp_ctrl_wflags_0 = io_lsu_req_bits_0_bits_uop_fp_ctrl_wflags; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fp_ctrl_vec_0 = io_lsu_req_bits_0_bits_uop_fp_ctrl_vec; // @[dcache.scala:438:7]
wire [4:0] io_lsu_req_bits_0_bits_uop_rob_idx_0 = io_lsu_req_bits_0_bits_uop_rob_idx; // @[dcache.scala:438:7]
wire [3:0] io_lsu_req_bits_0_bits_uop_ldq_idx_0 = io_lsu_req_bits_0_bits_uop_ldq_idx; // @[dcache.scala:438:7]
wire [3:0] io_lsu_req_bits_0_bits_uop_stq_idx_0 = io_lsu_req_bits_0_bits_uop_stq_idx; // @[dcache.scala:438:7]
wire [1:0] io_lsu_req_bits_0_bits_uop_rxq_idx_0 = io_lsu_req_bits_0_bits_uop_rxq_idx; // @[dcache.scala:438:7]
wire [5:0] io_lsu_req_bits_0_bits_uop_pdst_0 = io_lsu_req_bits_0_bits_uop_pdst; // @[dcache.scala:438:7]
wire [5:0] io_lsu_req_bits_0_bits_uop_prs1_0 = io_lsu_req_bits_0_bits_uop_prs1; // @[dcache.scala:438:7]
wire [5:0] io_lsu_req_bits_0_bits_uop_prs2_0 = io_lsu_req_bits_0_bits_uop_prs2; // @[dcache.scala:438:7]
wire [5:0] io_lsu_req_bits_0_bits_uop_prs3_0 = io_lsu_req_bits_0_bits_uop_prs3; // @[dcache.scala:438:7]
wire [3:0] io_lsu_req_bits_0_bits_uop_ppred_0 = io_lsu_req_bits_0_bits_uop_ppred; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_prs1_busy_0 = io_lsu_req_bits_0_bits_uop_prs1_busy; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_prs2_busy_0 = io_lsu_req_bits_0_bits_uop_prs2_busy; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_prs3_busy_0 = io_lsu_req_bits_0_bits_uop_prs3_busy; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_ppred_busy_0 = io_lsu_req_bits_0_bits_uop_ppred_busy; // @[dcache.scala:438:7]
wire [5:0] io_lsu_req_bits_0_bits_uop_stale_pdst_0 = io_lsu_req_bits_0_bits_uop_stale_pdst; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_exception_0 = io_lsu_req_bits_0_bits_uop_exception; // @[dcache.scala:438:7]
wire [63:0] io_lsu_req_bits_0_bits_uop_exc_cause_0 = io_lsu_req_bits_0_bits_uop_exc_cause; // @[dcache.scala:438:7]
wire [4:0] io_lsu_req_bits_0_bits_uop_mem_cmd_0 = io_lsu_req_bits_0_bits_uop_mem_cmd; // @[dcache.scala:438:7]
wire [1:0] io_lsu_req_bits_0_bits_uop_mem_size_0 = io_lsu_req_bits_0_bits_uop_mem_size; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_mem_signed_0 = io_lsu_req_bits_0_bits_uop_mem_signed; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_uses_ldq_0 = io_lsu_req_bits_0_bits_uop_uses_ldq; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_uses_stq_0 = io_lsu_req_bits_0_bits_uop_uses_stq; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_is_unique_0 = io_lsu_req_bits_0_bits_uop_is_unique; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_flush_on_commit_0 = io_lsu_req_bits_0_bits_uop_flush_on_commit; // @[dcache.scala:438:7]
wire [2:0] io_lsu_req_bits_0_bits_uop_csr_cmd_0 = io_lsu_req_bits_0_bits_uop_csr_cmd; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_ldst_is_rs1_0 = io_lsu_req_bits_0_bits_uop_ldst_is_rs1; // @[dcache.scala:438:7]
wire [5:0] io_lsu_req_bits_0_bits_uop_ldst_0 = io_lsu_req_bits_0_bits_uop_ldst; // @[dcache.scala:438:7]
wire [5:0] io_lsu_req_bits_0_bits_uop_lrs1_0 = io_lsu_req_bits_0_bits_uop_lrs1; // @[dcache.scala:438:7]
wire [5:0] io_lsu_req_bits_0_bits_uop_lrs2_0 = io_lsu_req_bits_0_bits_uop_lrs2; // @[dcache.scala:438:7]
wire [5:0] io_lsu_req_bits_0_bits_uop_lrs3_0 = io_lsu_req_bits_0_bits_uop_lrs3; // @[dcache.scala:438:7]
wire [1:0] io_lsu_req_bits_0_bits_uop_dst_rtype_0 = io_lsu_req_bits_0_bits_uop_dst_rtype; // @[dcache.scala:438:7]
wire [1:0] io_lsu_req_bits_0_bits_uop_lrs1_rtype_0 = io_lsu_req_bits_0_bits_uop_lrs1_rtype; // @[dcache.scala:438:7]
wire [1:0] io_lsu_req_bits_0_bits_uop_lrs2_rtype_0 = io_lsu_req_bits_0_bits_uop_lrs2_rtype; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_frs3_en_0 = io_lsu_req_bits_0_bits_uop_frs3_en; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fcn_dw_0 = io_lsu_req_bits_0_bits_uop_fcn_dw; // @[dcache.scala:438:7]
wire [4:0] io_lsu_req_bits_0_bits_uop_fcn_op_0 = io_lsu_req_bits_0_bits_uop_fcn_op; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_fp_val_0 = io_lsu_req_bits_0_bits_uop_fp_val; // @[dcache.scala:438:7]
wire [2:0] io_lsu_req_bits_0_bits_uop_fp_rm_0 = io_lsu_req_bits_0_bits_uop_fp_rm; // @[dcache.scala:438:7]
wire [1:0] io_lsu_req_bits_0_bits_uop_fp_typ_0 = io_lsu_req_bits_0_bits_uop_fp_typ; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_xcpt_pf_if_0 = io_lsu_req_bits_0_bits_uop_xcpt_pf_if; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_xcpt_ae_if_0 = io_lsu_req_bits_0_bits_uop_xcpt_ae_if; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_xcpt_ma_if_0 = io_lsu_req_bits_0_bits_uop_xcpt_ma_if; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_bp_debug_if_0 = io_lsu_req_bits_0_bits_uop_bp_debug_if; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_uop_bp_xcpt_if_0 = io_lsu_req_bits_0_bits_uop_bp_xcpt_if; // @[dcache.scala:438:7]
wire [2:0] io_lsu_req_bits_0_bits_uop_debug_fsrc_0 = io_lsu_req_bits_0_bits_uop_debug_fsrc; // @[dcache.scala:438:7]
wire [2:0] io_lsu_req_bits_0_bits_uop_debug_tsrc_0 = io_lsu_req_bits_0_bits_uop_debug_tsrc; // @[dcache.scala:438:7]
wire [33:0] io_lsu_req_bits_0_bits_addr_0 = io_lsu_req_bits_0_bits_addr; // @[dcache.scala:438:7]
wire [63:0] io_lsu_req_bits_0_bits_data_0 = io_lsu_req_bits_0_bits_data; // @[dcache.scala:438:7]
wire io_lsu_req_bits_0_bits_is_hella_0 = io_lsu_req_bits_0_bits_is_hella; // @[dcache.scala:438:7]
wire io_lsu_s1_kill_0_0 = io_lsu_s1_kill_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_ready_0 = io_lsu_ll_resp_ready; // @[dcache.scala:438:7]
wire [4:0] io_lsu_rob_pnr_idx_0 = io_lsu_rob_pnr_idx; // @[dcache.scala:438:7]
wire [4:0] io_lsu_rob_head_idx_0 = io_lsu_rob_head_idx; // @[dcache.scala:438:7]
wire io_lsu_release_ready_0 = io_lsu_release_ready; // @[dcache.scala:438:7]
wire io_lsu_force_order_0 = io_lsu_force_order; // @[dcache.scala:438:7]
wire auto_out_a_bits_corrupt = 1'h0; // @[dcache.scala:438:7]
wire auto_out_c_bits_corrupt = 1'h0; // @[dcache.scala:438:7]
wire io_errors_bus_valid = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_s1_nack_advisory_0 = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_is_rvc = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_iq_type_0 = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_iq_type_1 = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_iq_type_2 = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_iq_type_3 = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fu_code_0 = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fu_code_1 = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fu_code_2 = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fu_code_3 = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fu_code_4 = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fu_code_5 = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fu_code_6 = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fu_code_7 = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fu_code_8 = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fu_code_9 = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_iw_issued = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_iw_issued_partial_agen = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_iw_issued_partial_dgen = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_iw_p1_speculative_child = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_iw_p2_speculative_child = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_iw_p1_bypass_hint = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_iw_p2_bypass_hint = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_iw_p3_bypass_hint = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_dis_col_sel = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_is_sfb = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_is_fence = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_is_fencei = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_is_sfence = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_is_amo = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_is_eret = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_is_sys_pc2epc = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_is_rocc = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_is_mov = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_edge_inst = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_taken = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_imm_rename = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fp_ctrl_ldst = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fp_ctrl_wen = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fp_ctrl_ren1 = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fp_ctrl_ren2 = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fp_ctrl_ren3 = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fp_ctrl_swap12 = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fp_ctrl_swap23 = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fp_ctrl_fromint = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fp_ctrl_toint = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fp_ctrl_fastpipe = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fp_ctrl_fma = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fp_ctrl_div = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fp_ctrl_sqrt = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fp_ctrl_wflags = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fp_ctrl_vec = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_prs1_busy = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_prs2_busy = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_prs3_busy = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_ppred_busy = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_exception = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_mem_signed = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_uses_ldq = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_uses_stq = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_is_unique = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_flush_on_commit = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_ldst_is_rs1 = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_frs3_en = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fcn_dw = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_fp_val = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_xcpt_pf_if = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_xcpt_ae_if = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_xcpt_ma_if = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_bp_debug_if = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_uop_bp_xcpt_if = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_mispredict = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_brupdate_b2_taken = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_exception = 1'h0; // @[dcache.scala:438:7]
wire io_lsu_release_bits_corrupt = 1'h0; // @[dcache.scala:438:7]
wire nodeOut_a_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17]
wire nodeOut_c_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17]
wire singlePortedDCacheWrite = 1'h0; // @[dcache.scala:503:53]
wire mshr_read_req_0_uop_is_rvc = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_iq_type_0 = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_iq_type_1 = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_iq_type_2 = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_iq_type_3 = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fu_code_0 = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fu_code_1 = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fu_code_2 = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fu_code_3 = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fu_code_4 = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fu_code_5 = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fu_code_6 = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fu_code_7 = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fu_code_8 = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fu_code_9 = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_iw_issued = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_iw_issued_partial_agen = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_iw_issued_partial_dgen = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_iw_p1_speculative_child = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_iw_p2_speculative_child = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_iw_p1_bypass_hint = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_iw_p2_bypass_hint = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_iw_p3_bypass_hint = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_dis_col_sel = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_is_sfb = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_is_fence = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_is_fencei = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_is_sfence = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_is_amo = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_is_eret = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_is_sys_pc2epc = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_is_rocc = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_is_mov = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_edge_inst = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_taken = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_imm_rename = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fp_ctrl_ldst = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fp_ctrl_wen = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fp_ctrl_ren1 = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fp_ctrl_ren2 = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fp_ctrl_ren3 = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fp_ctrl_swap12 = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fp_ctrl_swap23 = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fp_ctrl_fromint = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fp_ctrl_toint = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fp_ctrl_fastpipe = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fp_ctrl_fma = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fp_ctrl_div = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fp_ctrl_sqrt = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fp_ctrl_wflags = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fp_ctrl_vec = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_prs1_busy = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_prs2_busy = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_prs3_busy = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_ppred_busy = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_exception = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_mem_signed = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_uses_ldq = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_uses_stq = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_is_unique = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_flush_on_commit = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_ldst_is_rs1 = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_frs3_en = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fcn_dw = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_fp_val = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_xcpt_pf_if = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_xcpt_ae_if = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_xcpt_ma_if = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_bp_debug_if = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_uop_bp_xcpt_if = 1'h0; // @[dcache.scala:549:27]
wire mshr_read_req_0_is_hella = 1'h0; // @[dcache.scala:549:27]
wire _mshr_read_req_0_uop_WIRE_is_rvc = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_iq_type_0 = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_iq_type_1 = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_iq_type_2 = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_iq_type_3 = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fu_code_0 = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fu_code_1 = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fu_code_2 = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fu_code_3 = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fu_code_4 = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fu_code_5 = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fu_code_6 = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fu_code_7 = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fu_code_8 = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fu_code_9 = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_iw_issued = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_iw_issued_partial_agen = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_iw_issued_partial_dgen = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_iw_p1_speculative_child = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_iw_p2_speculative_child = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_iw_p1_bypass_hint = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_iw_p2_bypass_hint = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_iw_p3_bypass_hint = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_dis_col_sel = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_is_sfb = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_is_fence = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_is_fencei = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_is_sfence = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_is_amo = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_is_eret = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_is_sys_pc2epc = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_is_rocc = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_is_mov = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_edge_inst = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_taken = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_imm_rename = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fp_ctrl_ldst = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fp_ctrl_wen = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fp_ctrl_ren1 = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fp_ctrl_ren2 = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fp_ctrl_ren3 = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fp_ctrl_swap12 = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fp_ctrl_swap23 = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fp_ctrl_fromint = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fp_ctrl_toint = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fp_ctrl_fastpipe = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fp_ctrl_fma = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fp_ctrl_div = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fp_ctrl_sqrt = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fp_ctrl_wflags = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fp_ctrl_vec = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_prs1_busy = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_prs2_busy = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_prs3_busy = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_ppred_busy = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_exception = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_mem_signed = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_uses_ldq = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_uses_stq = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_is_unique = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_flush_on_commit = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_ldst_is_rs1 = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_frs3_en = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fcn_dw = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_fp_val = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_xcpt_pf_if = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_xcpt_ae_if = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_xcpt_ma_if = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_bp_debug_if = 1'h0; // @[consts.scala:141:57]
wire _mshr_read_req_0_uop_WIRE_bp_xcpt_if = 1'h0; // @[consts.scala:141:57]
wire wb_req_0_uop_is_rvc = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_iq_type_0 = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_iq_type_1 = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_iq_type_2 = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_iq_type_3 = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fu_code_0 = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fu_code_1 = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fu_code_2 = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fu_code_3 = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fu_code_4 = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fu_code_5 = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fu_code_6 = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fu_code_7 = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fu_code_8 = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fu_code_9 = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_iw_issued = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_iw_issued_partial_agen = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_iw_issued_partial_dgen = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_iw_p1_speculative_child = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_iw_p2_speculative_child = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_iw_p1_bypass_hint = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_iw_p2_bypass_hint = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_iw_p3_bypass_hint = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_dis_col_sel = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_is_sfb = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_is_fence = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_is_fencei = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_is_sfence = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_is_amo = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_is_eret = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_is_sys_pc2epc = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_is_rocc = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_is_mov = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_edge_inst = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_taken = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_imm_rename = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fp_ctrl_ldst = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fp_ctrl_wen = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fp_ctrl_ren1 = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fp_ctrl_ren2 = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fp_ctrl_ren3 = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fp_ctrl_swap12 = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fp_ctrl_swap23 = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fp_ctrl_fromint = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fp_ctrl_toint = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fp_ctrl_fastpipe = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fp_ctrl_fma = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fp_ctrl_div = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fp_ctrl_sqrt = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fp_ctrl_wflags = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fp_ctrl_vec = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_prs1_busy = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_prs2_busy = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_prs3_busy = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_ppred_busy = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_exception = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_mem_signed = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_uses_ldq = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_uses_stq = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_is_unique = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_flush_on_commit = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_ldst_is_rs1 = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_frs3_en = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fcn_dw = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_fp_val = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_xcpt_pf_if = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_xcpt_ae_if = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_xcpt_ma_if = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_bp_debug_if = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_uop_bp_xcpt_if = 1'h0; // @[dcache.scala:564:20]
wire wb_req_0_is_hella = 1'h0; // @[dcache.scala:564:20]
wire _wb_req_0_uop_WIRE_is_rvc = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_iq_type_0 = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_iq_type_1 = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_iq_type_2 = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_iq_type_3 = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fu_code_0 = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fu_code_1 = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fu_code_2 = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fu_code_3 = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fu_code_4 = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fu_code_5 = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fu_code_6 = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fu_code_7 = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fu_code_8 = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fu_code_9 = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_iw_issued = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_iw_issued_partial_agen = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_iw_issued_partial_dgen = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_iw_p1_speculative_child = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_iw_p2_speculative_child = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_iw_p1_bypass_hint = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_iw_p2_bypass_hint = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_iw_p3_bypass_hint = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_dis_col_sel = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_is_sfb = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_is_fence = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_is_fencei = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_is_sfence = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_is_amo = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_is_eret = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_is_sys_pc2epc = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_is_rocc = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_is_mov = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_edge_inst = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_taken = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_imm_rename = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fp_ctrl_ldst = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fp_ctrl_wen = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fp_ctrl_ren1 = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fp_ctrl_ren2 = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fp_ctrl_ren3 = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fp_ctrl_swap12 = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fp_ctrl_swap23 = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fp_ctrl_fromint = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fp_ctrl_toint = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fp_ctrl_fastpipe = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fp_ctrl_fma = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fp_ctrl_div = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fp_ctrl_sqrt = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fp_ctrl_wflags = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fp_ctrl_vec = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_prs1_busy = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_prs2_busy = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_prs3_busy = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_ppred_busy = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_exception = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_mem_signed = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_uses_ldq = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_uses_stq = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_is_unique = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_flush_on_commit = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_ldst_is_rs1 = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_frs3_en = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fcn_dw = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_fp_val = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_xcpt_pf_if = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_xcpt_ae_if = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_xcpt_ma_if = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_bp_debug_if = 1'h0; // @[consts.scala:141:57]
wire _wb_req_0_uop_WIRE_bp_xcpt_if = 1'h0; // @[consts.scala:141:57]
wire prober_req_0_uop_is_rvc = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_iq_type_0 = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_iq_type_1 = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_iq_type_2 = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_iq_type_3 = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fu_code_0 = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fu_code_1 = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fu_code_2 = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fu_code_3 = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fu_code_4 = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fu_code_5 = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fu_code_6 = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fu_code_7 = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fu_code_8 = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fu_code_9 = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_iw_issued = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_iw_issued_partial_agen = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_iw_issued_partial_dgen = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_iw_p1_speculative_child = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_iw_p2_speculative_child = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_iw_p1_bypass_hint = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_iw_p2_bypass_hint = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_iw_p3_bypass_hint = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_dis_col_sel = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_is_sfb = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_is_fence = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_is_fencei = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_is_sfence = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_is_amo = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_is_eret = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_is_sys_pc2epc = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_is_rocc = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_is_mov = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_edge_inst = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_taken = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_imm_rename = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fp_ctrl_ldst = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fp_ctrl_wen = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fp_ctrl_ren1 = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fp_ctrl_ren2 = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fp_ctrl_ren3 = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fp_ctrl_swap12 = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fp_ctrl_swap23 = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fp_ctrl_fromint = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fp_ctrl_toint = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fp_ctrl_fastpipe = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fp_ctrl_fma = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fp_ctrl_div = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fp_ctrl_sqrt = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fp_ctrl_wflags = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fp_ctrl_vec = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_prs1_busy = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_prs2_busy = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_prs3_busy = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_ppred_busy = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_exception = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_mem_signed = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_uses_ldq = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_uses_stq = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_is_unique = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_flush_on_commit = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_ldst_is_rs1 = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_frs3_en = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fcn_dw = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_fp_val = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_xcpt_pf_if = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_xcpt_ae_if = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_xcpt_ma_if = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_bp_debug_if = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_uop_bp_xcpt_if = 1'h0; // @[dcache.scala:586:26]
wire prober_req_0_is_hella = 1'h0; // @[dcache.scala:586:26]
wire _prober_req_0_uop_WIRE_is_rvc = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_iq_type_0 = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_iq_type_1 = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_iq_type_2 = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_iq_type_3 = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fu_code_0 = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fu_code_1 = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fu_code_2 = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fu_code_3 = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fu_code_4 = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fu_code_5 = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fu_code_6 = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fu_code_7 = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fu_code_8 = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fu_code_9 = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_iw_issued = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_iw_issued_partial_agen = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_iw_issued_partial_dgen = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_iw_p1_speculative_child = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_iw_p2_speculative_child = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_iw_p1_bypass_hint = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_iw_p2_bypass_hint = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_iw_p3_bypass_hint = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_dis_col_sel = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_is_sfb = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_is_fence = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_is_fencei = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_is_sfence = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_is_amo = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_is_eret = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_is_sys_pc2epc = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_is_rocc = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_is_mov = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_edge_inst = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_taken = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_imm_rename = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fp_ctrl_ldst = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fp_ctrl_wen = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fp_ctrl_ren1 = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fp_ctrl_ren2 = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fp_ctrl_ren3 = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fp_ctrl_swap12 = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fp_ctrl_swap23 = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fp_ctrl_fromint = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fp_ctrl_toint = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fp_ctrl_fastpipe = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fp_ctrl_fma = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fp_ctrl_div = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fp_ctrl_sqrt = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fp_ctrl_wflags = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fp_ctrl_vec = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_prs1_busy = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_prs2_busy = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_prs3_busy = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_ppred_busy = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_exception = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_mem_signed = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_uses_ldq = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_uses_stq = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_is_unique = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_flush_on_commit = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_ldst_is_rs1 = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_frs3_en = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fcn_dw = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_fp_val = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_xcpt_pf_if = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_xcpt_ae_if = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_xcpt_ma_if = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_bp_debug_if = 1'h0; // @[consts.scala:141:57]
wire _prober_req_0_uop_WIRE_bp_xcpt_if = 1'h0; // @[consts.scala:141:57]
wire prefetch_fire = 1'h0; // @[Decoupled.scala:51:35]
wire prefetch_req_0_uop_is_rvc = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_iq_type_0 = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_iq_type_1 = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_iq_type_2 = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_iq_type_3 = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fu_code_0 = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fu_code_1 = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fu_code_2 = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fu_code_3 = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fu_code_4 = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fu_code_5 = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fu_code_6 = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fu_code_7 = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fu_code_8 = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fu_code_9 = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_iw_issued = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_iw_issued_partial_agen = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_iw_issued_partial_dgen = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_iw_p1_speculative_child = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_iw_p2_speculative_child = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_iw_p1_bypass_hint = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_iw_p2_bypass_hint = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_iw_p3_bypass_hint = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_dis_col_sel = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_is_sfb = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_is_fence = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_is_fencei = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_is_sfence = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_is_amo = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_is_eret = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_is_sys_pc2epc = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_is_rocc = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_is_mov = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_edge_inst = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_taken = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_imm_rename = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fp_ctrl_ldst = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fp_ctrl_wen = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fp_ctrl_ren1 = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fp_ctrl_ren2 = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fp_ctrl_ren3 = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fp_ctrl_swap12 = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fp_ctrl_swap23 = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fp_ctrl_fromint = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fp_ctrl_toint = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fp_ctrl_fastpipe = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fp_ctrl_fma = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fp_ctrl_div = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fp_ctrl_sqrt = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fp_ctrl_wflags = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fp_ctrl_vec = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_prs1_busy = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_prs2_busy = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_prs3_busy = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_ppred_busy = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_exception = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_mem_signed = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_uses_ldq = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_uses_stq = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_is_unique = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_flush_on_commit = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_ldst_is_rs1 = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_frs3_en = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fcn_dw = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_fp_val = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_xcpt_pf_if = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_xcpt_ae_if = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_xcpt_ma_if = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_bp_debug_if = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_uop_bp_xcpt_if = 1'h0; // @[dcache.scala:601:27]
wire prefetch_req_0_is_hella = 1'h0; // @[dcache.scala:601:27]
wire _s0_valid_WIRE_2_0 = 1'h0; // @[dcache.scala:614:82]
wire _s1_valid_T_1 = 1'h0; // @[util.scala:126:59]
wire _s1_valid_T_2 = 1'h0; // @[util.scala:61:61]
wire _s1_valid_T_5 = 1'h0; // @[dcache.scala:640:45]
wire _s2_valid_T_3 = 1'h0; // @[util.scala:126:59]
wire _s2_valid_T_4 = 1'h0; // @[util.scala:61:61]
wire _s2_valid_T_7 = 1'h0; // @[dcache.scala:676:45]
wire _s2_has_permission_r_T_26 = 1'h0; // @[Misc.scala:35:9]
wire _s2_has_permission_r_T_29 = 1'h0; // @[Misc.scala:35:9]
wire _s2_has_permission_r_T_32 = 1'h0; // @[Misc.scala:35:9]
wire _s2_has_permission_r_T_35 = 1'h0; // @[Misc.scala:35:9]
wire _s2_has_permission_r_T_38 = 1'h0; // @[Misc.scala:35:9]
wire _s2_new_hit_state_r_T_26 = 1'h0; // @[Misc.scala:35:9]
wire _s2_new_hit_state_r_T_29 = 1'h0; // @[Misc.scala:35:9]
wire _s2_new_hit_state_r_T_32 = 1'h0; // @[Misc.scala:35:9]
wire _s2_new_hit_state_r_T_35 = 1'h0; // @[Misc.scala:35:9]
wire _s2_new_hit_state_r_T_38 = 1'h0; // @[Misc.scala:35:9]
wire s2_word_idx_0 = 1'h0; // @[dcache.scala:454:49]
wire _s2_nack_data_T = 1'h0; // @[dcache.scala:766:50]
wire s2_nack_data_0 = 1'h0; // @[dcache.scala:454:49]
wire _mshrs_io_req_0_valid_T_14 = 1'h0; // @[dcache.scala:798:48]
wire opdata_1 = 1'h0; // @[Edges.scala:102:36]
wire _state_WIRE_0 = 1'h0; // @[Arbiter.scala:88:34]
wire _state_WIRE_1 = 1'h0; // @[Arbiter.scala:88:34]
wire _nodeOut_c_bits_WIRE_corrupt = 1'h0; // @[Mux.scala:30:73]
wire _nodeOut_c_bits_T = 1'h0; // @[Mux.scala:30:73]
wire _nodeOut_c_bits_T_1 = 1'h0; // @[Mux.scala:30:73]
wire _nodeOut_c_bits_T_2 = 1'h0; // @[Mux.scala:30:73]
wire _nodeOut_c_bits_WIRE_1 = 1'h0; // @[Mux.scala:30:73]
wire io_lsu_resp_0_bits_data_doZero = 1'h0; // @[AMOALU.scala:43:31]
wire io_lsu_resp_0_bits_data_doZero_1 = 1'h0; // @[AMOALU.scala:43:31]
wire _mshrs_io_replay_ready_T_1 = 1'h1; // @[dcache.scala:534:91]
wire _metaReadArb_io_in_0_valid_T = 1'h1; // @[dcache.scala:537:71]
wire _dataReadArb_io_in_0_valid_T = 1'h1; // @[dcache.scala:542:71]
wire _metaReadArb_io_in_2_valid_T = 1'h1; // @[dcache.scala:573:65]
wire _wb_io_meta_read_ready_T_1 = 1'h1; // @[dcache.scala:575:88]
wire _dataReadArb_io_in_1_valid_T = 1'h1; // @[dcache.scala:577:64]
wire _wb_io_data_req_ready_T_1 = 1'h1; // @[dcache.scala:580:88]
wire _s0_valid_WIRE_1_0 = 1'h1; // @[dcache.scala:614:48]
wire _s1_valid_T_3 = 1'h1; // @[dcache.scala:639:26]
wire _s1_valid_T_6 = 1'h1; // @[dcache.scala:640:26]
wire _s2_valid_T_5 = 1'h1; // @[dcache.scala:675:26]
wire _s2_valid_T_8 = 1'h1; // @[dcache.scala:676:26]
wire _mshrs_io_req_0_valid_T_6 = 1'h1; // @[dcache.scala:795:29]
wire _mshrs_io_req_0_valid_T_15 = 1'h1; // @[dcache.scala:798:29]
wire [31:0] io_errors_bus_bits = 32'h0; // @[dcache.scala:438:7]
wire [31:0] io_lsu_brupdate_b2_uop_inst = 32'h0; // @[dcache.scala:438:7]
wire [31:0] io_lsu_brupdate_b2_uop_debug_inst = 32'h0; // @[dcache.scala:438:7]
wire [31:0] mshr_read_req_0_uop_inst = 32'h0; // @[dcache.scala:549:27]
wire [31:0] mshr_read_req_0_uop_debug_inst = 32'h0; // @[dcache.scala:549:27]
wire [31:0] _mshr_read_req_0_uop_WIRE_inst = 32'h0; // @[consts.scala:141:57]
wire [31:0] _mshr_read_req_0_uop_WIRE_debug_inst = 32'h0; // @[consts.scala:141:57]
wire [31:0] wb_req_0_uop_inst = 32'h0; // @[dcache.scala:564:20]
wire [31:0] wb_req_0_uop_debug_inst = 32'h0; // @[dcache.scala:564:20]
wire [31:0] _wb_req_0_uop_WIRE_inst = 32'h0; // @[consts.scala:141:57]
wire [31:0] _wb_req_0_uop_WIRE_debug_inst = 32'h0; // @[consts.scala:141:57]
wire [31:0] prober_req_0_uop_inst = 32'h0; // @[dcache.scala:586:26]
wire [31:0] prober_req_0_uop_debug_inst = 32'h0; // @[dcache.scala:586:26]
wire [31:0] _prober_req_0_uop_WIRE_inst = 32'h0; // @[consts.scala:141:57]
wire [31:0] _prober_req_0_uop_WIRE_debug_inst = 32'h0; // @[consts.scala:141:57]
wire [31:0] prefetch_req_0_uop_inst = 32'h0; // @[dcache.scala:601:27]
wire [31:0] prefetch_req_0_uop_debug_inst = 32'h0; // @[dcache.scala:601:27]
wire [3:0] io_lsu_brupdate_b1_resolve_mask = 4'h0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_brupdate_b1_mispredict_mask = 4'h0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_brupdate_b2_uop_br_mask = 4'h0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_brupdate_b2_uop_br_type = 4'h0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_brupdate_b2_uop_ftq_idx = 4'h0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_brupdate_b2_uop_ldq_idx = 4'h0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_brupdate_b2_uop_stq_idx = 4'h0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_brupdate_b2_uop_ppred = 4'h0; // @[dcache.scala:438:7]
wire [3:0] mshr_read_req_0_uop_br_mask = 4'h0; // @[dcache.scala:549:27]
wire [3:0] mshr_read_req_0_uop_br_type = 4'h0; // @[dcache.scala:549:27]
wire [3:0] mshr_read_req_0_uop_ftq_idx = 4'h0; // @[dcache.scala:549:27]
wire [3:0] mshr_read_req_0_uop_ldq_idx = 4'h0; // @[dcache.scala:549:27]
wire [3:0] mshr_read_req_0_uop_stq_idx = 4'h0; // @[dcache.scala:549:27]
wire [3:0] mshr_read_req_0_uop_ppred = 4'h0; // @[dcache.scala:549:27]
wire [3:0] _mshr_read_req_0_uop_WIRE_br_mask = 4'h0; // @[consts.scala:141:57]
wire [3:0] _mshr_read_req_0_uop_WIRE_br_type = 4'h0; // @[consts.scala:141:57]
wire [3:0] _mshr_read_req_0_uop_WIRE_ftq_idx = 4'h0; // @[consts.scala:141:57]
wire [3:0] _mshr_read_req_0_uop_WIRE_ldq_idx = 4'h0; // @[consts.scala:141:57]
wire [3:0] _mshr_read_req_0_uop_WIRE_stq_idx = 4'h0; // @[consts.scala:141:57]
wire [3:0] _mshr_read_req_0_uop_WIRE_ppred = 4'h0; // @[consts.scala:141:57]
wire [3:0] wb_req_0_uop_br_mask = 4'h0; // @[dcache.scala:564:20]
wire [3:0] wb_req_0_uop_br_type = 4'h0; // @[dcache.scala:564:20]
wire [3:0] wb_req_0_uop_ftq_idx = 4'h0; // @[dcache.scala:564:20]
wire [3:0] wb_req_0_uop_ldq_idx = 4'h0; // @[dcache.scala:564:20]
wire [3:0] wb_req_0_uop_stq_idx = 4'h0; // @[dcache.scala:564:20]
wire [3:0] wb_req_0_uop_ppred = 4'h0; // @[dcache.scala:564:20]
wire [3:0] _wb_req_0_uop_WIRE_br_mask = 4'h0; // @[consts.scala:141:57]
wire [3:0] _wb_req_0_uop_WIRE_br_type = 4'h0; // @[consts.scala:141:57]
wire [3:0] _wb_req_0_uop_WIRE_ftq_idx = 4'h0; // @[consts.scala:141:57]
wire [3:0] _wb_req_0_uop_WIRE_ldq_idx = 4'h0; // @[consts.scala:141:57]
wire [3:0] _wb_req_0_uop_WIRE_stq_idx = 4'h0; // @[consts.scala:141:57]
wire [3:0] _wb_req_0_uop_WIRE_ppred = 4'h0; // @[consts.scala:141:57]
wire [3:0] prober_req_0_uop_br_mask = 4'h0; // @[dcache.scala:586:26]
wire [3:0] prober_req_0_uop_br_type = 4'h0; // @[dcache.scala:586:26]
wire [3:0] prober_req_0_uop_ftq_idx = 4'h0; // @[dcache.scala:586:26]
wire [3:0] prober_req_0_uop_ldq_idx = 4'h0; // @[dcache.scala:586:26]
wire [3:0] prober_req_0_uop_stq_idx = 4'h0; // @[dcache.scala:586:26]
wire [3:0] prober_req_0_uop_ppred = 4'h0; // @[dcache.scala:586:26]
wire [3:0] _prober_req_0_uop_WIRE_br_mask = 4'h0; // @[consts.scala:141:57]
wire [3:0] _prober_req_0_uop_WIRE_br_type = 4'h0; // @[consts.scala:141:57]
wire [3:0] _prober_req_0_uop_WIRE_ftq_idx = 4'h0; // @[consts.scala:141:57]
wire [3:0] _prober_req_0_uop_WIRE_ldq_idx = 4'h0; // @[consts.scala:141:57]
wire [3:0] _prober_req_0_uop_WIRE_stq_idx = 4'h0; // @[consts.scala:141:57]
wire [3:0] _prober_req_0_uop_WIRE_ppred = 4'h0; // @[consts.scala:141:57]
wire [3:0] prefetch_req_0_uop_br_mask = 4'h0; // @[dcache.scala:601:27]
wire [3:0] prefetch_req_0_uop_br_type = 4'h0; // @[dcache.scala:601:27]
wire [3:0] prefetch_req_0_uop_ftq_idx = 4'h0; // @[dcache.scala:601:27]
wire [3:0] prefetch_req_0_uop_ldq_idx = 4'h0; // @[dcache.scala:601:27]
wire [3:0] prefetch_req_0_uop_stq_idx = 4'h0; // @[dcache.scala:601:27]
wire [3:0] prefetch_req_0_uop_ppred = 4'h0; // @[dcache.scala:601:27]
wire [3:0] _s1_valid_T = 4'h0; // @[util.scala:126:51]
wire [3:0] _s2_valid_T_2 = 4'h0; // @[util.scala:126:51]
wire [3:0] _s2_has_permission_r_T_16 = 4'h0; // @[Metadata.scala:68:10]
wire [3:0] _s2_new_hit_state_r_T_16 = 4'h0; // @[Metadata.scala:68:10]
wire [33:0] io_lsu_brupdate_b2_uop_debug_pc = 34'h0; // @[dcache.scala:438:7]
wire [33:0] io_lsu_brupdate_b2_jalr_target = 34'h0; // @[dcache.scala:438:7]
wire [33:0] mshr_read_req_0_uop_debug_pc = 34'h0; // @[dcache.scala:549:27]
wire [33:0] _mshr_read_req_0_uop_WIRE_debug_pc = 34'h0; // @[consts.scala:141:57]
wire [33:0] wb_req_0_uop_debug_pc = 34'h0; // @[dcache.scala:564:20]
wire [33:0] _wb_req_0_uop_WIRE_debug_pc = 34'h0; // @[consts.scala:141:57]
wire [33:0] prober_req_0_uop_debug_pc = 34'h0; // @[dcache.scala:586:26]
wire [33:0] _prober_req_0_uop_WIRE_debug_pc = 34'h0; // @[consts.scala:141:57]
wire [33:0] prefetch_req_0_uop_debug_pc = 34'h0; // @[dcache.scala:601:27]
wire [33:0] prefetch_req_0_addr = 34'h0; // @[dcache.scala:601:27]
wire [1:0] io_lsu_brupdate_b2_uop_br_tag = 2'h0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_brupdate_b2_uop_op1_sel = 2'h0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_brupdate_b2_uop_fp_ctrl_typeTagIn = 2'h0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_brupdate_b2_uop_fp_ctrl_typeTagOut = 2'h0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_brupdate_b2_uop_rxq_idx = 2'h0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_brupdate_b2_uop_mem_size = 2'h0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_brupdate_b2_uop_dst_rtype = 2'h0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_brupdate_b2_uop_lrs1_rtype = 2'h0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_brupdate_b2_uop_lrs2_rtype = 2'h0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_brupdate_b2_uop_fp_typ = 2'h0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_brupdate_b2_pc_sel = 2'h0; // @[dcache.scala:438:7]
wire [1:0] mshr_read_req_0_uop_br_tag = 2'h0; // @[dcache.scala:549:27]
wire [1:0] mshr_read_req_0_uop_op1_sel = 2'h0; // @[dcache.scala:549:27]
wire [1:0] mshr_read_req_0_uop_fp_ctrl_typeTagIn = 2'h0; // @[dcache.scala:549:27]
wire [1:0] mshr_read_req_0_uop_fp_ctrl_typeTagOut = 2'h0; // @[dcache.scala:549:27]
wire [1:0] mshr_read_req_0_uop_rxq_idx = 2'h0; // @[dcache.scala:549:27]
wire [1:0] mshr_read_req_0_uop_mem_size = 2'h0; // @[dcache.scala:549:27]
wire [1:0] mshr_read_req_0_uop_dst_rtype = 2'h0; // @[dcache.scala:549:27]
wire [1:0] mshr_read_req_0_uop_lrs1_rtype = 2'h0; // @[dcache.scala:549:27]
wire [1:0] mshr_read_req_0_uop_lrs2_rtype = 2'h0; // @[dcache.scala:549:27]
wire [1:0] mshr_read_req_0_uop_fp_typ = 2'h0; // @[dcache.scala:549:27]
wire [1:0] _mshr_read_req_0_uop_WIRE_br_tag = 2'h0; // @[consts.scala:141:57]
wire [1:0] _mshr_read_req_0_uop_WIRE_op1_sel = 2'h0; // @[consts.scala:141:57]
wire [1:0] _mshr_read_req_0_uop_WIRE_fp_ctrl_typeTagIn = 2'h0; // @[consts.scala:141:57]
wire [1:0] _mshr_read_req_0_uop_WIRE_fp_ctrl_typeTagOut = 2'h0; // @[consts.scala:141:57]
wire [1:0] _mshr_read_req_0_uop_WIRE_rxq_idx = 2'h0; // @[consts.scala:141:57]
wire [1:0] _mshr_read_req_0_uop_WIRE_mem_size = 2'h0; // @[consts.scala:141:57]
wire [1:0] _mshr_read_req_0_uop_WIRE_dst_rtype = 2'h0; // @[consts.scala:141:57]
wire [1:0] _mshr_read_req_0_uop_WIRE_lrs1_rtype = 2'h0; // @[consts.scala:141:57]
wire [1:0] _mshr_read_req_0_uop_WIRE_lrs2_rtype = 2'h0; // @[consts.scala:141:57]
wire [1:0] _mshr_read_req_0_uop_WIRE_fp_typ = 2'h0; // @[consts.scala:141:57]
wire [1:0] wb_req_0_uop_br_tag = 2'h0; // @[dcache.scala:564:20]
wire [1:0] wb_req_0_uop_op1_sel = 2'h0; // @[dcache.scala:564:20]
wire [1:0] wb_req_0_uop_fp_ctrl_typeTagIn = 2'h0; // @[dcache.scala:564:20]
wire [1:0] wb_req_0_uop_fp_ctrl_typeTagOut = 2'h0; // @[dcache.scala:564:20]
wire [1:0] wb_req_0_uop_rxq_idx = 2'h0; // @[dcache.scala:564:20]
wire [1:0] wb_req_0_uop_mem_size = 2'h0; // @[dcache.scala:564:20]
wire [1:0] wb_req_0_uop_dst_rtype = 2'h0; // @[dcache.scala:564:20]
wire [1:0] wb_req_0_uop_lrs1_rtype = 2'h0; // @[dcache.scala:564:20]
wire [1:0] wb_req_0_uop_lrs2_rtype = 2'h0; // @[dcache.scala:564:20]
wire [1:0] wb_req_0_uop_fp_typ = 2'h0; // @[dcache.scala:564:20]
wire [1:0] _wb_req_0_uop_WIRE_br_tag = 2'h0; // @[consts.scala:141:57]
wire [1:0] _wb_req_0_uop_WIRE_op1_sel = 2'h0; // @[consts.scala:141:57]
wire [1:0] _wb_req_0_uop_WIRE_fp_ctrl_typeTagIn = 2'h0; // @[consts.scala:141:57]
wire [1:0] _wb_req_0_uop_WIRE_fp_ctrl_typeTagOut = 2'h0; // @[consts.scala:141:57]
wire [1:0] _wb_req_0_uop_WIRE_rxq_idx = 2'h0; // @[consts.scala:141:57]
wire [1:0] _wb_req_0_uop_WIRE_mem_size = 2'h0; // @[consts.scala:141:57]
wire [1:0] _wb_req_0_uop_WIRE_dst_rtype = 2'h0; // @[consts.scala:141:57]
wire [1:0] _wb_req_0_uop_WIRE_lrs1_rtype = 2'h0; // @[consts.scala:141:57]
wire [1:0] _wb_req_0_uop_WIRE_lrs2_rtype = 2'h0; // @[consts.scala:141:57]
wire [1:0] _wb_req_0_uop_WIRE_fp_typ = 2'h0; // @[consts.scala:141:57]
wire [1:0] prober_req_0_uop_br_tag = 2'h0; // @[dcache.scala:586:26]
wire [1:0] prober_req_0_uop_op1_sel = 2'h0; // @[dcache.scala:586:26]
wire [1:0] prober_req_0_uop_fp_ctrl_typeTagIn = 2'h0; // @[dcache.scala:586:26]
wire [1:0] prober_req_0_uop_fp_ctrl_typeTagOut = 2'h0; // @[dcache.scala:586:26]
wire [1:0] prober_req_0_uop_rxq_idx = 2'h0; // @[dcache.scala:586:26]
wire [1:0] prober_req_0_uop_mem_size = 2'h0; // @[dcache.scala:586:26]
wire [1:0] prober_req_0_uop_dst_rtype = 2'h0; // @[dcache.scala:586:26]
wire [1:0] prober_req_0_uop_lrs1_rtype = 2'h0; // @[dcache.scala:586:26]
wire [1:0] prober_req_0_uop_lrs2_rtype = 2'h0; // @[dcache.scala:586:26]
wire [1:0] prober_req_0_uop_fp_typ = 2'h0; // @[dcache.scala:586:26]
wire [1:0] _prober_req_0_uop_WIRE_br_tag = 2'h0; // @[consts.scala:141:57]
wire [1:0] _prober_req_0_uop_WIRE_op1_sel = 2'h0; // @[consts.scala:141:57]
wire [1:0] _prober_req_0_uop_WIRE_fp_ctrl_typeTagIn = 2'h0; // @[consts.scala:141:57]
wire [1:0] _prober_req_0_uop_WIRE_fp_ctrl_typeTagOut = 2'h0; // @[consts.scala:141:57]
wire [1:0] _prober_req_0_uop_WIRE_rxq_idx = 2'h0; // @[consts.scala:141:57]
wire [1:0] _prober_req_0_uop_WIRE_mem_size = 2'h0; // @[consts.scala:141:57]
wire [1:0] _prober_req_0_uop_WIRE_dst_rtype = 2'h0; // @[consts.scala:141:57]
wire [1:0] _prober_req_0_uop_WIRE_lrs1_rtype = 2'h0; // @[consts.scala:141:57]
wire [1:0] _prober_req_0_uop_WIRE_lrs2_rtype = 2'h0; // @[consts.scala:141:57]
wire [1:0] _prober_req_0_uop_WIRE_fp_typ = 2'h0; // @[consts.scala:141:57]
wire [1:0] prefetch_req_0_uop_br_tag = 2'h0; // @[dcache.scala:601:27]
wire [1:0] prefetch_req_0_uop_op1_sel = 2'h0; // @[dcache.scala:601:27]
wire [1:0] prefetch_req_0_uop_fp_ctrl_typeTagIn = 2'h0; // @[dcache.scala:601:27]
wire [1:0] prefetch_req_0_uop_fp_ctrl_typeTagOut = 2'h0; // @[dcache.scala:601:27]
wire [1:0] prefetch_req_0_uop_rxq_idx = 2'h0; // @[dcache.scala:601:27]
wire [1:0] prefetch_req_0_uop_mem_size = 2'h0; // @[dcache.scala:601:27]
wire [1:0] prefetch_req_0_uop_dst_rtype = 2'h0; // @[dcache.scala:601:27]
wire [1:0] prefetch_req_0_uop_lrs1_rtype = 2'h0; // @[dcache.scala:601:27]
wire [1:0] prefetch_req_0_uop_lrs2_rtype = 2'h0; // @[dcache.scala:601:27]
wire [1:0] prefetch_req_0_uop_fp_typ = 2'h0; // @[dcache.scala:601:27]
wire [1:0] _s2_has_permission_r_T_1 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _s2_has_permission_r_T_3 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _s2_has_permission_r_T_5 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _s2_has_permission_r_T_15 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _s2_new_hit_state_r_T_1 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _s2_new_hit_state_r_T_3 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _s2_new_hit_state_r_T_5 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _s2_new_hit_state_r_T_15 = 2'h0; // @[Metadata.scala:26:15]
wire [5:0] io_lsu_brupdate_b2_uop_pc_lob = 6'h0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_brupdate_b2_uop_pdst = 6'h0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_brupdate_b2_uop_prs1 = 6'h0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_brupdate_b2_uop_prs2 = 6'h0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_brupdate_b2_uop_prs3 = 6'h0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_brupdate_b2_uop_stale_pdst = 6'h0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_brupdate_b2_uop_ldst = 6'h0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_brupdate_b2_uop_lrs1 = 6'h0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_brupdate_b2_uop_lrs2 = 6'h0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_brupdate_b2_uop_lrs3 = 6'h0; // @[dcache.scala:438:7]
wire [5:0] mshr_read_req_0_uop_pc_lob = 6'h0; // @[dcache.scala:549:27]
wire [5:0] mshr_read_req_0_uop_pdst = 6'h0; // @[dcache.scala:549:27]
wire [5:0] mshr_read_req_0_uop_prs1 = 6'h0; // @[dcache.scala:549:27]
wire [5:0] mshr_read_req_0_uop_prs2 = 6'h0; // @[dcache.scala:549:27]
wire [5:0] mshr_read_req_0_uop_prs3 = 6'h0; // @[dcache.scala:549:27]
wire [5:0] mshr_read_req_0_uop_stale_pdst = 6'h0; // @[dcache.scala:549:27]
wire [5:0] mshr_read_req_0_uop_ldst = 6'h0; // @[dcache.scala:549:27]
wire [5:0] mshr_read_req_0_uop_lrs1 = 6'h0; // @[dcache.scala:549:27]
wire [5:0] mshr_read_req_0_uop_lrs2 = 6'h0; // @[dcache.scala:549:27]
wire [5:0] mshr_read_req_0_uop_lrs3 = 6'h0; // @[dcache.scala:549:27]
wire [5:0] _mshr_read_req_0_uop_WIRE_pc_lob = 6'h0; // @[consts.scala:141:57]
wire [5:0] _mshr_read_req_0_uop_WIRE_pdst = 6'h0; // @[consts.scala:141:57]
wire [5:0] _mshr_read_req_0_uop_WIRE_prs1 = 6'h0; // @[consts.scala:141:57]
wire [5:0] _mshr_read_req_0_uop_WIRE_prs2 = 6'h0; // @[consts.scala:141:57]
wire [5:0] _mshr_read_req_0_uop_WIRE_prs3 = 6'h0; // @[consts.scala:141:57]
wire [5:0] _mshr_read_req_0_uop_WIRE_stale_pdst = 6'h0; // @[consts.scala:141:57]
wire [5:0] _mshr_read_req_0_uop_WIRE_ldst = 6'h0; // @[consts.scala:141:57]
wire [5:0] _mshr_read_req_0_uop_WIRE_lrs1 = 6'h0; // @[consts.scala:141:57]
wire [5:0] _mshr_read_req_0_uop_WIRE_lrs2 = 6'h0; // @[consts.scala:141:57]
wire [5:0] _mshr_read_req_0_uop_WIRE_lrs3 = 6'h0; // @[consts.scala:141:57]
wire [5:0] wb_req_0_uop_pc_lob = 6'h0; // @[dcache.scala:564:20]
wire [5:0] wb_req_0_uop_pdst = 6'h0; // @[dcache.scala:564:20]
wire [5:0] wb_req_0_uop_prs1 = 6'h0; // @[dcache.scala:564:20]
wire [5:0] wb_req_0_uop_prs2 = 6'h0; // @[dcache.scala:564:20]
wire [5:0] wb_req_0_uop_prs3 = 6'h0; // @[dcache.scala:564:20]
wire [5:0] wb_req_0_uop_stale_pdst = 6'h0; // @[dcache.scala:564:20]
wire [5:0] wb_req_0_uop_ldst = 6'h0; // @[dcache.scala:564:20]
wire [5:0] wb_req_0_uop_lrs1 = 6'h0; // @[dcache.scala:564:20]
wire [5:0] wb_req_0_uop_lrs2 = 6'h0; // @[dcache.scala:564:20]
wire [5:0] wb_req_0_uop_lrs3 = 6'h0; // @[dcache.scala:564:20]
wire [5:0] _wb_req_0_uop_WIRE_pc_lob = 6'h0; // @[consts.scala:141:57]
wire [5:0] _wb_req_0_uop_WIRE_pdst = 6'h0; // @[consts.scala:141:57]
wire [5:0] _wb_req_0_uop_WIRE_prs1 = 6'h0; // @[consts.scala:141:57]
wire [5:0] _wb_req_0_uop_WIRE_prs2 = 6'h0; // @[consts.scala:141:57]
wire [5:0] _wb_req_0_uop_WIRE_prs3 = 6'h0; // @[consts.scala:141:57]
wire [5:0] _wb_req_0_uop_WIRE_stale_pdst = 6'h0; // @[consts.scala:141:57]
wire [5:0] _wb_req_0_uop_WIRE_ldst = 6'h0; // @[consts.scala:141:57]
wire [5:0] _wb_req_0_uop_WIRE_lrs1 = 6'h0; // @[consts.scala:141:57]
wire [5:0] _wb_req_0_uop_WIRE_lrs2 = 6'h0; // @[consts.scala:141:57]
wire [5:0] _wb_req_0_uop_WIRE_lrs3 = 6'h0; // @[consts.scala:141:57]
wire [5:0] prober_req_0_uop_pc_lob = 6'h0; // @[dcache.scala:586:26]
wire [5:0] prober_req_0_uop_pdst = 6'h0; // @[dcache.scala:586:26]
wire [5:0] prober_req_0_uop_prs1 = 6'h0; // @[dcache.scala:586:26]
wire [5:0] prober_req_0_uop_prs2 = 6'h0; // @[dcache.scala:586:26]
wire [5:0] prober_req_0_uop_prs3 = 6'h0; // @[dcache.scala:586:26]
wire [5:0] prober_req_0_uop_stale_pdst = 6'h0; // @[dcache.scala:586:26]
wire [5:0] prober_req_0_uop_ldst = 6'h0; // @[dcache.scala:586:26]
wire [5:0] prober_req_0_uop_lrs1 = 6'h0; // @[dcache.scala:586:26]
wire [5:0] prober_req_0_uop_lrs2 = 6'h0; // @[dcache.scala:586:26]
wire [5:0] prober_req_0_uop_lrs3 = 6'h0; // @[dcache.scala:586:26]
wire [5:0] _prober_req_0_uop_WIRE_pc_lob = 6'h0; // @[consts.scala:141:57]
wire [5:0] _prober_req_0_uop_WIRE_pdst = 6'h0; // @[consts.scala:141:57]
wire [5:0] _prober_req_0_uop_WIRE_prs1 = 6'h0; // @[consts.scala:141:57]
wire [5:0] _prober_req_0_uop_WIRE_prs2 = 6'h0; // @[consts.scala:141:57]
wire [5:0] _prober_req_0_uop_WIRE_prs3 = 6'h0; // @[consts.scala:141:57]
wire [5:0] _prober_req_0_uop_WIRE_stale_pdst = 6'h0; // @[consts.scala:141:57]
wire [5:0] _prober_req_0_uop_WIRE_ldst = 6'h0; // @[consts.scala:141:57]
wire [5:0] _prober_req_0_uop_WIRE_lrs1 = 6'h0; // @[consts.scala:141:57]
wire [5:0] _prober_req_0_uop_WIRE_lrs2 = 6'h0; // @[consts.scala:141:57]
wire [5:0] _prober_req_0_uop_WIRE_lrs3 = 6'h0; // @[consts.scala:141:57]
wire [5:0] prefetch_req_0_uop_pc_lob = 6'h0; // @[dcache.scala:601:27]
wire [5:0] prefetch_req_0_uop_pdst = 6'h0; // @[dcache.scala:601:27]
wire [5:0] prefetch_req_0_uop_prs1 = 6'h0; // @[dcache.scala:601:27]
wire [5:0] prefetch_req_0_uop_prs2 = 6'h0; // @[dcache.scala:601:27]
wire [5:0] prefetch_req_0_uop_prs3 = 6'h0; // @[dcache.scala:601:27]
wire [5:0] prefetch_req_0_uop_stale_pdst = 6'h0; // @[dcache.scala:601:27]
wire [5:0] prefetch_req_0_uop_ldst = 6'h0; // @[dcache.scala:601:27]
wire [5:0] prefetch_req_0_uop_lrs1 = 6'h0; // @[dcache.scala:601:27]
wire [5:0] prefetch_req_0_uop_lrs2 = 6'h0; // @[dcache.scala:601:27]
wire [5:0] prefetch_req_0_uop_lrs3 = 6'h0; // @[dcache.scala:601:27]
wire [2:0] io_lsu_brupdate_b2_uop_imm_sel = 3'h0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_brupdate_b2_uop_op2_sel = 3'h0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_brupdate_b2_uop_csr_cmd = 3'h0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_brupdate_b2_uop_fp_rm = 3'h0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_brupdate_b2_uop_debug_fsrc = 3'h0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_brupdate_b2_uop_debug_tsrc = 3'h0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_brupdate_b2_cfi_type = 3'h0; // @[dcache.scala:438:7]
wire [2:0] mshr_read_req_0_uop_imm_sel = 3'h0; // @[dcache.scala:549:27]
wire [2:0] mshr_read_req_0_uop_op2_sel = 3'h0; // @[dcache.scala:549:27]
wire [2:0] mshr_read_req_0_uop_csr_cmd = 3'h0; // @[dcache.scala:549:27]
wire [2:0] mshr_read_req_0_uop_fp_rm = 3'h0; // @[dcache.scala:549:27]
wire [2:0] mshr_read_req_0_uop_debug_fsrc = 3'h0; // @[dcache.scala:549:27]
wire [2:0] mshr_read_req_0_uop_debug_tsrc = 3'h0; // @[dcache.scala:549:27]
wire [2:0] _mshr_read_req_0_uop_WIRE_imm_sel = 3'h0; // @[consts.scala:141:57]
wire [2:0] _mshr_read_req_0_uop_WIRE_op2_sel = 3'h0; // @[consts.scala:141:57]
wire [2:0] _mshr_read_req_0_uop_WIRE_csr_cmd = 3'h0; // @[consts.scala:141:57]
wire [2:0] _mshr_read_req_0_uop_WIRE_fp_rm = 3'h0; // @[consts.scala:141:57]
wire [2:0] _mshr_read_req_0_uop_WIRE_debug_fsrc = 3'h0; // @[consts.scala:141:57]
wire [2:0] _mshr_read_req_0_uop_WIRE_debug_tsrc = 3'h0; // @[consts.scala:141:57]
wire [2:0] wb_req_0_uop_imm_sel = 3'h0; // @[dcache.scala:564:20]
wire [2:0] wb_req_0_uop_op2_sel = 3'h0; // @[dcache.scala:564:20]
wire [2:0] wb_req_0_uop_csr_cmd = 3'h0; // @[dcache.scala:564:20]
wire [2:0] wb_req_0_uop_fp_rm = 3'h0; // @[dcache.scala:564:20]
wire [2:0] wb_req_0_uop_debug_fsrc = 3'h0; // @[dcache.scala:564:20]
wire [2:0] wb_req_0_uop_debug_tsrc = 3'h0; // @[dcache.scala:564:20]
wire [2:0] _wb_req_0_uop_WIRE_imm_sel = 3'h0; // @[consts.scala:141:57]
wire [2:0] _wb_req_0_uop_WIRE_op2_sel = 3'h0; // @[consts.scala:141:57]
wire [2:0] _wb_req_0_uop_WIRE_csr_cmd = 3'h0; // @[consts.scala:141:57]
wire [2:0] _wb_req_0_uop_WIRE_fp_rm = 3'h0; // @[consts.scala:141:57]
wire [2:0] _wb_req_0_uop_WIRE_debug_fsrc = 3'h0; // @[consts.scala:141:57]
wire [2:0] _wb_req_0_uop_WIRE_debug_tsrc = 3'h0; // @[consts.scala:141:57]
wire [2:0] prober_req_0_uop_imm_sel = 3'h0; // @[dcache.scala:586:26]
wire [2:0] prober_req_0_uop_op2_sel = 3'h0; // @[dcache.scala:586:26]
wire [2:0] prober_req_0_uop_csr_cmd = 3'h0; // @[dcache.scala:586:26]
wire [2:0] prober_req_0_uop_fp_rm = 3'h0; // @[dcache.scala:586:26]
wire [2:0] prober_req_0_uop_debug_fsrc = 3'h0; // @[dcache.scala:586:26]
wire [2:0] prober_req_0_uop_debug_tsrc = 3'h0; // @[dcache.scala:586:26]
wire [2:0] _prober_req_0_uop_WIRE_imm_sel = 3'h0; // @[consts.scala:141:57]
wire [2:0] _prober_req_0_uop_WIRE_op2_sel = 3'h0; // @[consts.scala:141:57]
wire [2:0] _prober_req_0_uop_WIRE_csr_cmd = 3'h0; // @[consts.scala:141:57]
wire [2:0] _prober_req_0_uop_WIRE_fp_rm = 3'h0; // @[consts.scala:141:57]
wire [2:0] _prober_req_0_uop_WIRE_debug_fsrc = 3'h0; // @[consts.scala:141:57]
wire [2:0] _prober_req_0_uop_WIRE_debug_tsrc = 3'h0; // @[consts.scala:141:57]
wire [2:0] prefetch_req_0_uop_imm_sel = 3'h0; // @[dcache.scala:601:27]
wire [2:0] prefetch_req_0_uop_op2_sel = 3'h0; // @[dcache.scala:601:27]
wire [2:0] prefetch_req_0_uop_csr_cmd = 3'h0; // @[dcache.scala:601:27]
wire [2:0] prefetch_req_0_uop_fp_rm = 3'h0; // @[dcache.scala:601:27]
wire [2:0] prefetch_req_0_uop_debug_fsrc = 3'h0; // @[dcache.scala:601:27]
wire [2:0] prefetch_req_0_uop_debug_tsrc = 3'h0; // @[dcache.scala:601:27]
wire [4:0] io_lsu_brupdate_b2_uop_pimm = 5'h0; // @[dcache.scala:438:7]
wire [4:0] io_lsu_brupdate_b2_uop_rob_idx = 5'h0; // @[dcache.scala:438:7]
wire [4:0] io_lsu_brupdate_b2_uop_mem_cmd = 5'h0; // @[dcache.scala:438:7]
wire [4:0] io_lsu_brupdate_b2_uop_fcn_op = 5'h0; // @[dcache.scala:438:7]
wire [4:0] mshr_read_req_0_uop_pimm = 5'h0; // @[dcache.scala:549:27]
wire [4:0] mshr_read_req_0_uop_rob_idx = 5'h0; // @[dcache.scala:549:27]
wire [4:0] mshr_read_req_0_uop_mem_cmd = 5'h0; // @[dcache.scala:549:27]
wire [4:0] mshr_read_req_0_uop_fcn_op = 5'h0; // @[dcache.scala:549:27]
wire [4:0] _mshr_read_req_0_uop_WIRE_pimm = 5'h0; // @[consts.scala:141:57]
wire [4:0] _mshr_read_req_0_uop_WIRE_rob_idx = 5'h0; // @[consts.scala:141:57]
wire [4:0] _mshr_read_req_0_uop_WIRE_mem_cmd = 5'h0; // @[consts.scala:141:57]
wire [4:0] _mshr_read_req_0_uop_WIRE_fcn_op = 5'h0; // @[consts.scala:141:57]
wire [4:0] wb_req_0_uop_pimm = 5'h0; // @[dcache.scala:564:20]
wire [4:0] wb_req_0_uop_rob_idx = 5'h0; // @[dcache.scala:564:20]
wire [4:0] wb_req_0_uop_mem_cmd = 5'h0; // @[dcache.scala:564:20]
wire [4:0] wb_req_0_uop_fcn_op = 5'h0; // @[dcache.scala:564:20]
wire [4:0] _wb_req_0_uop_WIRE_pimm = 5'h0; // @[consts.scala:141:57]
wire [4:0] _wb_req_0_uop_WIRE_rob_idx = 5'h0; // @[consts.scala:141:57]
wire [4:0] _wb_req_0_uop_WIRE_mem_cmd = 5'h0; // @[consts.scala:141:57]
wire [4:0] _wb_req_0_uop_WIRE_fcn_op = 5'h0; // @[consts.scala:141:57]
wire [4:0] prober_req_0_uop_pimm = 5'h0; // @[dcache.scala:586:26]
wire [4:0] prober_req_0_uop_rob_idx = 5'h0; // @[dcache.scala:586:26]
wire [4:0] prober_req_0_uop_mem_cmd = 5'h0; // @[dcache.scala:586:26]
wire [4:0] prober_req_0_uop_fcn_op = 5'h0; // @[dcache.scala:586:26]
wire [4:0] _prober_req_0_uop_WIRE_pimm = 5'h0; // @[consts.scala:141:57]
wire [4:0] _prober_req_0_uop_WIRE_rob_idx = 5'h0; // @[consts.scala:141:57]
wire [4:0] _prober_req_0_uop_WIRE_mem_cmd = 5'h0; // @[consts.scala:141:57]
wire [4:0] _prober_req_0_uop_WIRE_fcn_op = 5'h0; // @[consts.scala:141:57]
wire [4:0] prefetch_req_0_uop_pimm = 5'h0; // @[dcache.scala:601:27]
wire [4:0] prefetch_req_0_uop_rob_idx = 5'h0; // @[dcache.scala:601:27]
wire [4:0] prefetch_req_0_uop_mem_cmd = 5'h0; // @[dcache.scala:601:27]
wire [4:0] prefetch_req_0_uop_fcn_op = 5'h0; // @[dcache.scala:601:27]
wire [19:0] io_lsu_brupdate_b2_uop_imm_packed = 20'h0; // @[dcache.scala:438:7]
wire [19:0] mshr_read_req_0_uop_imm_packed = 20'h0; // @[dcache.scala:549:27]
wire [19:0] _mshr_read_req_0_uop_WIRE_imm_packed = 20'h0; // @[consts.scala:141:57]
wire [19:0] wb_req_0_uop_imm_packed = 20'h0; // @[dcache.scala:564:20]
wire [19:0] _wb_req_0_uop_WIRE_imm_packed = 20'h0; // @[consts.scala:141:57]
wire [19:0] prober_req_0_uop_imm_packed = 20'h0; // @[dcache.scala:586:26]
wire [19:0] _prober_req_0_uop_WIRE_imm_packed = 20'h0; // @[consts.scala:141:57]
wire [19:0] prefetch_req_0_uop_imm_packed = 20'h0; // @[dcache.scala:601:27]
wire [63:0] io_lsu_brupdate_b2_uop_exc_cause = 64'h0; // @[dcache.scala:438:7]
wire [63:0] mshr_read_req_0_uop_exc_cause = 64'h0; // @[dcache.scala:549:27]
wire [63:0] mshr_read_req_0_data = 64'h0; // @[dcache.scala:549:27]
wire [63:0] _mshr_read_req_0_uop_WIRE_exc_cause = 64'h0; // @[consts.scala:141:57]
wire [63:0] wb_req_0_uop_exc_cause = 64'h0; // @[dcache.scala:564:20]
wire [63:0] wb_req_0_data = 64'h0; // @[dcache.scala:564:20]
wire [63:0] _wb_req_0_uop_WIRE_exc_cause = 64'h0; // @[consts.scala:141:57]
wire [63:0] prober_req_0_uop_exc_cause = 64'h0; // @[dcache.scala:586:26]
wire [63:0] prober_req_0_data = 64'h0; // @[dcache.scala:586:26]
wire [63:0] _prober_req_0_uop_WIRE_exc_cause = 64'h0; // @[consts.scala:141:57]
wire [63:0] prefetch_req_0_uop_exc_cause = 64'h0; // @[dcache.scala:601:27]
wire [63:0] prefetch_req_0_data = 64'h0; // @[dcache.scala:601:27]
wire [63:0] _nodeOut_c_bits_T_4 = 64'h0; // @[Mux.scala:30:73]
wire [20:0] io_lsu_brupdate_b2_target_offset = 21'h0; // @[dcache.scala:438:7]
wire [6:0] _s2_data_word_prebypass_T = 7'h0; // @[dcache.scala:868:69]
wire [8:0] maskedBeats_1 = 9'h0; // @[Arbiter.scala:82:69]
wire [8:0] decode = 9'h7; // @[Edges.scala:220:59]
wire [11:0] _decode_T_2 = 12'h3F; // @[package.scala:243:46]
wire [11:0] _decode_T_1 = 12'hFC0; // @[package.scala:243:76]
wire [26:0] _decode_T = 27'h3FFC0; // @[package.scala:243:71]
wire [3:0] _s2_has_permission_r_T_10 = 4'h6; // @[Metadata.scala:64:10]
wire [3:0] _s2_new_hit_state_r_T_10 = 4'h6; // @[Metadata.scala:64:10]
wire [3:0] _s2_has_permission_r_T_24 = 4'hC; // @[Metadata.scala:72:10]
wire [3:0] _s2_new_hit_state_r_T_24 = 4'hC; // @[Metadata.scala:72:10]
wire [3:0] _s2_has_permission_r_T_22 = 4'hD; // @[Metadata.scala:71:10]
wire [3:0] _s2_new_hit_state_r_T_22 = 4'hD; // @[Metadata.scala:71:10]
wire [3:0] _s2_has_permission_r_T_20 = 4'h4; // @[Metadata.scala:70:10]
wire [3:0] _s2_new_hit_state_r_T_20 = 4'h4; // @[Metadata.scala:70:10]
wire [3:0] _s2_has_permission_r_T_18 = 4'h5; // @[Metadata.scala:69:10]
wire [3:0] _s2_new_hit_state_r_T_18 = 4'h5; // @[Metadata.scala:69:10]
wire [3:0] _s2_has_permission_r_T_14 = 4'hE; // @[Metadata.scala:66:10]
wire [3:0] _s2_new_hit_state_r_T_14 = 4'hE; // @[Metadata.scala:66:10]
wire [3:0] _s1_req_0_uop_br_mask_T = 4'hF; // @[util.scala:93:27]
wire [3:0] _s2_req_0_uop_br_mask_T = 4'hF; // @[util.scala:93:27]
wire [3:0] _s2_has_permission_r_T_12 = 4'hF; // @[Metadata.scala:65:10]
wire [3:0] _s2_new_hit_state_r_T_12 = 4'hF; // @[Metadata.scala:65:10]
wire [3:0] _s2_has_permission_r_T_8 = 4'h7; // @[Metadata.scala:63:10]
wire [3:0] _s2_new_hit_state_r_T_8 = 4'h7; // @[Metadata.scala:63:10]
wire [3:0] _s2_has_permission_r_T_6 = 4'h1; // @[Metadata.scala:62:10]
wire [3:0] _s2_new_hit_state_r_T_6 = 4'h1; // @[Metadata.scala:62:10]
wire [3:0] _s2_has_permission_r_T_4 = 4'h2; // @[Metadata.scala:61:10]
wire [3:0] _s2_new_hit_state_r_T_4 = 4'h2; // @[Metadata.scala:61:10]
wire [3:0] _s2_has_permission_r_T_2 = 4'h3; // @[Metadata.scala:60:10]
wire [3:0] _s2_new_hit_state_r_T_2 = 4'h3; // @[Metadata.scala:60:10]
wire [27:0] _metaReadArb_io_in_5_bits_req_0_idx_T = 28'h0; // @[dcache.scala:606:74]
wire [1:0] _s2_has_permission_r_T_7 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _s2_has_permission_r_T_9 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _s2_has_permission_r_T_17 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _s2_has_permission_r_T_19 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _s2_new_hit_state_r_T_7 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _s2_new_hit_state_r_T_9 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _s2_new_hit_state_r_T_17 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _s2_new_hit_state_r_T_19 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _dataWriteArb_io_in_0_bits_wmask_T = 2'h1; // @[OneHot.scala:58:35]
wire [1:0] _dataReadArb_io_in_2_bits_req_0_way_en_T = 2'h3; // @[dcache.scala:522:48]
wire [1:0] _s2_has_permission_r_T_11 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _s2_has_permission_r_T_13 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _s2_has_permission_r_T_21 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _s2_has_permission_r_T_23 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _s2_new_hit_state_r_T_11 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _s2_new_hit_state_r_T_13 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _s2_new_hit_state_r_T_21 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _s2_new_hit_state_r_T_23 = 2'h3; // @[Metadata.scala:24:15]
wire nodeOut_a_ready = auto_out_a_ready_0; // @[MixedNode.scala:542:17]
wire nodeOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] nodeOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire nodeOut_b_ready; // @[MixedNode.scala:542:17]
wire nodeOut_b_valid = auto_out_b_valid_0; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_b_bits_opcode = auto_out_b_bits_opcode_0; // @[MixedNode.scala:542:17]
wire [1:0] nodeOut_b_bits_param = auto_out_b_bits_param_0; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_b_bits_size = auto_out_b_bits_size_0; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_b_bits_source = auto_out_b_bits_source_0; // @[MixedNode.scala:542:17]
wire [31:0] nodeOut_b_bits_address = auto_out_b_bits_address_0; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_b_bits_mask = auto_out_b_bits_mask_0; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_b_bits_data = auto_out_b_bits_data_0; // @[MixedNode.scala:542:17]
wire nodeOut_b_bits_corrupt = auto_out_b_bits_corrupt_0; // @[MixedNode.scala:542:17]
wire nodeOut_c_ready = auto_out_c_ready_0; // @[MixedNode.scala:542:17]
wire nodeOut_c_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_c_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_c_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_c_bits_size; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_c_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] nodeOut_c_bits_address; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_c_bits_data; // @[MixedNode.scala:542:17]
wire nodeOut_d_ready; // @[MixedNode.scala:542:17]
wire nodeOut_d_valid = auto_out_d_valid_0; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_d_bits_opcode = auto_out_d_bits_opcode_0; // @[MixedNode.scala:542:17]
wire [1:0] nodeOut_d_bits_param = auto_out_d_bits_param_0; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_d_bits_size = auto_out_d_bits_size_0; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_d_bits_source = auto_out_d_bits_source_0; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_d_bits_sink = auto_out_d_bits_sink_0; // @[MixedNode.scala:542:17]
wire nodeOut_d_bits_denied = auto_out_d_bits_denied_0; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_d_bits_data = auto_out_d_bits_data_0; // @[MixedNode.scala:542:17]
wire nodeOut_d_bits_corrupt = auto_out_d_bits_corrupt_0; // @[MixedNode.scala:542:17]
wire nodeOut_e_ready = auto_out_e_ready_0; // @[MixedNode.scala:542:17]
wire nodeOut_e_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_e_bits_sink; // @[MixedNode.scala:542:17]
wire _io_lsu_req_ready_T_2; // @[dcache.scala:511:80]
wire _s0_valid_WIRE_0 = io_lsu_req_bits_0_valid_0; // @[dcache.scala:438:7, :612:46]
wire [31:0] _s0_req_WIRE_0_uop_inst = io_lsu_req_bits_0_bits_uop_inst_0; // @[dcache.scala:438:7, :615:56]
wire [31:0] _s0_req_WIRE_0_uop_debug_inst = io_lsu_req_bits_0_bits_uop_debug_inst_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_is_rvc = io_lsu_req_bits_0_bits_uop_is_rvc_0; // @[dcache.scala:438:7, :615:56]
wire [33:0] _s0_req_WIRE_0_uop_debug_pc = io_lsu_req_bits_0_bits_uop_debug_pc_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_iq_type_0 = io_lsu_req_bits_0_bits_uop_iq_type_0_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_iq_type_1 = io_lsu_req_bits_0_bits_uop_iq_type_1_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_iq_type_2 = io_lsu_req_bits_0_bits_uop_iq_type_2_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_iq_type_3 = io_lsu_req_bits_0_bits_uop_iq_type_3_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fu_code_0 = io_lsu_req_bits_0_bits_uop_fu_code_0_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fu_code_1 = io_lsu_req_bits_0_bits_uop_fu_code_1_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fu_code_2 = io_lsu_req_bits_0_bits_uop_fu_code_2_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fu_code_3 = io_lsu_req_bits_0_bits_uop_fu_code_3_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fu_code_4 = io_lsu_req_bits_0_bits_uop_fu_code_4_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fu_code_5 = io_lsu_req_bits_0_bits_uop_fu_code_5_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fu_code_6 = io_lsu_req_bits_0_bits_uop_fu_code_6_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fu_code_7 = io_lsu_req_bits_0_bits_uop_fu_code_7_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fu_code_8 = io_lsu_req_bits_0_bits_uop_fu_code_8_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fu_code_9 = io_lsu_req_bits_0_bits_uop_fu_code_9_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_iw_issued = io_lsu_req_bits_0_bits_uop_iw_issued_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_iw_issued_partial_agen = io_lsu_req_bits_0_bits_uop_iw_issued_partial_agen_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_iw_issued_partial_dgen = io_lsu_req_bits_0_bits_uop_iw_issued_partial_dgen_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_iw_p1_speculative_child = io_lsu_req_bits_0_bits_uop_iw_p1_speculative_child_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_iw_p2_speculative_child = io_lsu_req_bits_0_bits_uop_iw_p2_speculative_child_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_iw_p1_bypass_hint = io_lsu_req_bits_0_bits_uop_iw_p1_bypass_hint_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_iw_p2_bypass_hint = io_lsu_req_bits_0_bits_uop_iw_p2_bypass_hint_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_iw_p3_bypass_hint = io_lsu_req_bits_0_bits_uop_iw_p3_bypass_hint_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_dis_col_sel = io_lsu_req_bits_0_bits_uop_dis_col_sel_0; // @[dcache.scala:438:7, :615:56]
wire [3:0] _s0_req_WIRE_0_uop_br_mask = io_lsu_req_bits_0_bits_uop_br_mask_0; // @[dcache.scala:438:7, :615:56]
wire [1:0] _s0_req_WIRE_0_uop_br_tag = io_lsu_req_bits_0_bits_uop_br_tag_0; // @[dcache.scala:438:7, :615:56]
wire [3:0] _s0_req_WIRE_0_uop_br_type = io_lsu_req_bits_0_bits_uop_br_type_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_is_sfb = io_lsu_req_bits_0_bits_uop_is_sfb_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_is_fence = io_lsu_req_bits_0_bits_uop_is_fence_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_is_fencei = io_lsu_req_bits_0_bits_uop_is_fencei_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_is_sfence = io_lsu_req_bits_0_bits_uop_is_sfence_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_is_amo = io_lsu_req_bits_0_bits_uop_is_amo_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_is_eret = io_lsu_req_bits_0_bits_uop_is_eret_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_is_sys_pc2epc = io_lsu_req_bits_0_bits_uop_is_sys_pc2epc_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_is_rocc = io_lsu_req_bits_0_bits_uop_is_rocc_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_is_mov = io_lsu_req_bits_0_bits_uop_is_mov_0; // @[dcache.scala:438:7, :615:56]
wire [3:0] _s0_req_WIRE_0_uop_ftq_idx = io_lsu_req_bits_0_bits_uop_ftq_idx_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_edge_inst = io_lsu_req_bits_0_bits_uop_edge_inst_0; // @[dcache.scala:438:7, :615:56]
wire [5:0] _s0_req_WIRE_0_uop_pc_lob = io_lsu_req_bits_0_bits_uop_pc_lob_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_taken = io_lsu_req_bits_0_bits_uop_taken_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_imm_rename = io_lsu_req_bits_0_bits_uop_imm_rename_0; // @[dcache.scala:438:7, :615:56]
wire [2:0] _s0_req_WIRE_0_uop_imm_sel = io_lsu_req_bits_0_bits_uop_imm_sel_0; // @[dcache.scala:438:7, :615:56]
wire [4:0] _s0_req_WIRE_0_uop_pimm = io_lsu_req_bits_0_bits_uop_pimm_0; // @[dcache.scala:438:7, :615:56]
wire [19:0] _s0_req_WIRE_0_uop_imm_packed = io_lsu_req_bits_0_bits_uop_imm_packed_0; // @[dcache.scala:438:7, :615:56]
wire [1:0] _s0_req_WIRE_0_uop_op1_sel = io_lsu_req_bits_0_bits_uop_op1_sel_0; // @[dcache.scala:438:7, :615:56]
wire [2:0] _s0_req_WIRE_0_uop_op2_sel = io_lsu_req_bits_0_bits_uop_op2_sel_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fp_ctrl_ldst = io_lsu_req_bits_0_bits_uop_fp_ctrl_ldst_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fp_ctrl_wen = io_lsu_req_bits_0_bits_uop_fp_ctrl_wen_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fp_ctrl_ren1 = io_lsu_req_bits_0_bits_uop_fp_ctrl_ren1_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fp_ctrl_ren2 = io_lsu_req_bits_0_bits_uop_fp_ctrl_ren2_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fp_ctrl_ren3 = io_lsu_req_bits_0_bits_uop_fp_ctrl_ren3_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fp_ctrl_swap12 = io_lsu_req_bits_0_bits_uop_fp_ctrl_swap12_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fp_ctrl_swap23 = io_lsu_req_bits_0_bits_uop_fp_ctrl_swap23_0; // @[dcache.scala:438:7, :615:56]
wire [1:0] _s0_req_WIRE_0_uop_fp_ctrl_typeTagIn = io_lsu_req_bits_0_bits_uop_fp_ctrl_typeTagIn_0; // @[dcache.scala:438:7, :615:56]
wire [1:0] _s0_req_WIRE_0_uop_fp_ctrl_typeTagOut = io_lsu_req_bits_0_bits_uop_fp_ctrl_typeTagOut_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fp_ctrl_fromint = io_lsu_req_bits_0_bits_uop_fp_ctrl_fromint_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fp_ctrl_toint = io_lsu_req_bits_0_bits_uop_fp_ctrl_toint_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fp_ctrl_fastpipe = io_lsu_req_bits_0_bits_uop_fp_ctrl_fastpipe_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fp_ctrl_fma = io_lsu_req_bits_0_bits_uop_fp_ctrl_fma_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fp_ctrl_div = io_lsu_req_bits_0_bits_uop_fp_ctrl_div_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fp_ctrl_sqrt = io_lsu_req_bits_0_bits_uop_fp_ctrl_sqrt_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fp_ctrl_wflags = io_lsu_req_bits_0_bits_uop_fp_ctrl_wflags_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fp_ctrl_vec = io_lsu_req_bits_0_bits_uop_fp_ctrl_vec_0; // @[dcache.scala:438:7, :615:56]
wire [4:0] _s0_req_WIRE_0_uop_rob_idx = io_lsu_req_bits_0_bits_uop_rob_idx_0; // @[dcache.scala:438:7, :615:56]
wire [3:0] _s0_req_WIRE_0_uop_ldq_idx = io_lsu_req_bits_0_bits_uop_ldq_idx_0; // @[dcache.scala:438:7, :615:56]
wire [3:0] _s0_req_WIRE_0_uop_stq_idx = io_lsu_req_bits_0_bits_uop_stq_idx_0; // @[dcache.scala:438:7, :615:56]
wire [1:0] _s0_req_WIRE_0_uop_rxq_idx = io_lsu_req_bits_0_bits_uop_rxq_idx_0; // @[dcache.scala:438:7, :615:56]
wire [5:0] _s0_req_WIRE_0_uop_pdst = io_lsu_req_bits_0_bits_uop_pdst_0; // @[dcache.scala:438:7, :615:56]
wire [5:0] _s0_req_WIRE_0_uop_prs1 = io_lsu_req_bits_0_bits_uop_prs1_0; // @[dcache.scala:438:7, :615:56]
wire [5:0] _s0_req_WIRE_0_uop_prs2 = io_lsu_req_bits_0_bits_uop_prs2_0; // @[dcache.scala:438:7, :615:56]
wire [5:0] _s0_req_WIRE_0_uop_prs3 = io_lsu_req_bits_0_bits_uop_prs3_0; // @[dcache.scala:438:7, :615:56]
wire [3:0] _s0_req_WIRE_0_uop_ppred = io_lsu_req_bits_0_bits_uop_ppred_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_prs1_busy = io_lsu_req_bits_0_bits_uop_prs1_busy_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_prs2_busy = io_lsu_req_bits_0_bits_uop_prs2_busy_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_prs3_busy = io_lsu_req_bits_0_bits_uop_prs3_busy_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_ppred_busy = io_lsu_req_bits_0_bits_uop_ppred_busy_0; // @[dcache.scala:438:7, :615:56]
wire [5:0] _s0_req_WIRE_0_uop_stale_pdst = io_lsu_req_bits_0_bits_uop_stale_pdst_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_exception = io_lsu_req_bits_0_bits_uop_exception_0; // @[dcache.scala:438:7, :615:56]
wire [63:0] _s0_req_WIRE_0_uop_exc_cause = io_lsu_req_bits_0_bits_uop_exc_cause_0; // @[dcache.scala:438:7, :615:56]
wire [4:0] _s0_req_WIRE_0_uop_mem_cmd = io_lsu_req_bits_0_bits_uop_mem_cmd_0; // @[dcache.scala:438:7, :615:56]
wire [1:0] _s0_req_WIRE_0_uop_mem_size = io_lsu_req_bits_0_bits_uop_mem_size_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_mem_signed = io_lsu_req_bits_0_bits_uop_mem_signed_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_uses_ldq = io_lsu_req_bits_0_bits_uop_uses_ldq_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_uses_stq = io_lsu_req_bits_0_bits_uop_uses_stq_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_is_unique = io_lsu_req_bits_0_bits_uop_is_unique_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_flush_on_commit = io_lsu_req_bits_0_bits_uop_flush_on_commit_0; // @[dcache.scala:438:7, :615:56]
wire [2:0] _s0_req_WIRE_0_uop_csr_cmd = io_lsu_req_bits_0_bits_uop_csr_cmd_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_ldst_is_rs1 = io_lsu_req_bits_0_bits_uop_ldst_is_rs1_0; // @[dcache.scala:438:7, :615:56]
wire [5:0] _s0_req_WIRE_0_uop_ldst = io_lsu_req_bits_0_bits_uop_ldst_0; // @[dcache.scala:438:7, :615:56]
wire [5:0] _s0_req_WIRE_0_uop_lrs1 = io_lsu_req_bits_0_bits_uop_lrs1_0; // @[dcache.scala:438:7, :615:56]
wire [5:0] _s0_req_WIRE_0_uop_lrs2 = io_lsu_req_bits_0_bits_uop_lrs2_0; // @[dcache.scala:438:7, :615:56]
wire [5:0] _s0_req_WIRE_0_uop_lrs3 = io_lsu_req_bits_0_bits_uop_lrs3_0; // @[dcache.scala:438:7, :615:56]
wire [1:0] _s0_req_WIRE_0_uop_dst_rtype = io_lsu_req_bits_0_bits_uop_dst_rtype_0; // @[dcache.scala:438:7, :615:56]
wire [1:0] _s0_req_WIRE_0_uop_lrs1_rtype = io_lsu_req_bits_0_bits_uop_lrs1_rtype_0; // @[dcache.scala:438:7, :615:56]
wire [1:0] _s0_req_WIRE_0_uop_lrs2_rtype = io_lsu_req_bits_0_bits_uop_lrs2_rtype_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_frs3_en = io_lsu_req_bits_0_bits_uop_frs3_en_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fcn_dw = io_lsu_req_bits_0_bits_uop_fcn_dw_0; // @[dcache.scala:438:7, :615:56]
wire [4:0] _s0_req_WIRE_0_uop_fcn_op = io_lsu_req_bits_0_bits_uop_fcn_op_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_fp_val = io_lsu_req_bits_0_bits_uop_fp_val_0; // @[dcache.scala:438:7, :615:56]
wire [2:0] _s0_req_WIRE_0_uop_fp_rm = io_lsu_req_bits_0_bits_uop_fp_rm_0; // @[dcache.scala:438:7, :615:56]
wire [1:0] _s0_req_WIRE_0_uop_fp_typ = io_lsu_req_bits_0_bits_uop_fp_typ_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_xcpt_pf_if = io_lsu_req_bits_0_bits_uop_xcpt_pf_if_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_xcpt_ae_if = io_lsu_req_bits_0_bits_uop_xcpt_ae_if_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_xcpt_ma_if = io_lsu_req_bits_0_bits_uop_xcpt_ma_if_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_bp_debug_if = io_lsu_req_bits_0_bits_uop_bp_debug_if_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_uop_bp_xcpt_if = io_lsu_req_bits_0_bits_uop_bp_xcpt_if_0; // @[dcache.scala:438:7, :615:56]
wire [2:0] _s0_req_WIRE_0_uop_debug_fsrc = io_lsu_req_bits_0_bits_uop_debug_fsrc_0; // @[dcache.scala:438:7, :615:56]
wire [2:0] _s0_req_WIRE_0_uop_debug_tsrc = io_lsu_req_bits_0_bits_uop_debug_tsrc_0; // @[dcache.scala:438:7, :615:56]
wire [33:0] _s0_req_WIRE_0_addr = io_lsu_req_bits_0_bits_addr_0; // @[dcache.scala:438:7, :615:56]
wire [63:0] _s0_req_WIRE_0_data = io_lsu_req_bits_0_bits_data_0; // @[dcache.scala:438:7, :615:56]
wire _s0_req_WIRE_0_is_hella = io_lsu_req_bits_0_bits_is_hella_0; // @[dcache.scala:438:7, :615:56]
wire _io_lsu_resp_0_valid_T; // @[dcache.scala:877:41]
wire [63:0] _io_lsu_resp_0_bits_data_T_24; // @[dcache.scala:879:49]
wire _io_lsu_store_ack_0_valid_T_1; // @[dcache.scala:888:70]
wire _io_lsu_nack_0_valid_T; // @[dcache.scala:884:41]
wire _io_lsu_ordered_T_3; // @[dcache.scala:941:66]
wire io_lsu_perf_acquire_done; // @[Edges.scala:233:22]
wire io_lsu_perf_release_done; // @[Edges.scala:233:22]
wire [2:0] auto_out_a_bits_opcode_0; // @[dcache.scala:438:7]
wire [2:0] auto_out_a_bits_param_0; // @[dcache.scala:438:7]
wire [3:0] auto_out_a_bits_size_0; // @[dcache.scala:438:7]
wire [3:0] auto_out_a_bits_source_0; // @[dcache.scala:438:7]
wire [31:0] auto_out_a_bits_address_0; // @[dcache.scala:438:7]
wire [7:0] auto_out_a_bits_mask_0; // @[dcache.scala:438:7]
wire [63:0] auto_out_a_bits_data_0; // @[dcache.scala:438:7]
wire auto_out_a_valid_0; // @[dcache.scala:438:7]
wire auto_out_b_ready_0; // @[dcache.scala:438:7]
wire [2:0] auto_out_c_bits_opcode_0; // @[dcache.scala:438:7]
wire [2:0] auto_out_c_bits_param_0; // @[dcache.scala:438:7]
wire [3:0] auto_out_c_bits_size_0; // @[dcache.scala:438:7]
wire [3:0] auto_out_c_bits_source_0; // @[dcache.scala:438:7]
wire [31:0] auto_out_c_bits_address_0; // @[dcache.scala:438:7]
wire [63:0] auto_out_c_bits_data_0; // @[dcache.scala:438:7]
wire auto_out_c_valid_0; // @[dcache.scala:438:7]
wire auto_out_d_ready_0; // @[dcache.scala:438:7]
wire [2:0] auto_out_e_bits_sink_0; // @[dcache.scala:438:7]
wire auto_out_e_valid_0; // @[dcache.scala:438:7]
wire io_lsu_req_ready_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_iq_type_0_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_iq_type_1_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_iq_type_2_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_iq_type_3_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fu_code_0_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fu_code_1_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fu_code_2_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fu_code_3_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fu_code_4_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fu_code_5_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fu_code_6_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fu_code_7_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fu_code_8_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fu_code_9_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fp_ctrl_ldst_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fp_ctrl_wen_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fp_ctrl_ren1_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fp_ctrl_ren2_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fp_ctrl_ren3_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fp_ctrl_swap12_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fp_ctrl_swap23_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_resp_0_bits_uop_fp_ctrl_typeTagIn_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_resp_0_bits_uop_fp_ctrl_typeTagOut_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fp_ctrl_fromint_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fp_ctrl_toint_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fp_ctrl_fastpipe_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fp_ctrl_fma_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fp_ctrl_div_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fp_ctrl_sqrt_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fp_ctrl_wflags_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fp_ctrl_vec_0; // @[dcache.scala:438:7]
wire [31:0] io_lsu_resp_0_bits_uop_inst_0; // @[dcache.scala:438:7]
wire [31:0] io_lsu_resp_0_bits_uop_debug_inst_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_is_rvc_0; // @[dcache.scala:438:7]
wire [33:0] io_lsu_resp_0_bits_uop_debug_pc_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_iw_issued_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_iw_issued_partial_agen_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_iw_issued_partial_dgen_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_iw_p1_speculative_child_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_iw_p2_speculative_child_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_iw_p1_bypass_hint_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_iw_p2_bypass_hint_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_iw_p3_bypass_hint_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_dis_col_sel_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_resp_0_bits_uop_br_mask_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_resp_0_bits_uop_br_tag_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_resp_0_bits_uop_br_type_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_is_sfb_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_is_fence_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_is_fencei_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_is_sfence_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_is_amo_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_is_eret_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_is_sys_pc2epc_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_is_rocc_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_is_mov_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_resp_0_bits_uop_ftq_idx_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_edge_inst_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_resp_0_bits_uop_pc_lob_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_taken_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_imm_rename_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_resp_0_bits_uop_imm_sel_0; // @[dcache.scala:438:7]
wire [4:0] io_lsu_resp_0_bits_uop_pimm_0; // @[dcache.scala:438:7]
wire [19:0] io_lsu_resp_0_bits_uop_imm_packed_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_resp_0_bits_uop_op1_sel_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_resp_0_bits_uop_op2_sel_0; // @[dcache.scala:438:7]
wire [4:0] io_lsu_resp_0_bits_uop_rob_idx_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_resp_0_bits_uop_ldq_idx_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_resp_0_bits_uop_stq_idx_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_resp_0_bits_uop_rxq_idx_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_resp_0_bits_uop_pdst_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_resp_0_bits_uop_prs1_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_resp_0_bits_uop_prs2_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_resp_0_bits_uop_prs3_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_resp_0_bits_uop_ppred_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_prs1_busy_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_prs2_busy_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_prs3_busy_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_ppred_busy_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_resp_0_bits_uop_stale_pdst_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_exception_0; // @[dcache.scala:438:7]
wire [63:0] io_lsu_resp_0_bits_uop_exc_cause_0; // @[dcache.scala:438:7]
wire [4:0] io_lsu_resp_0_bits_uop_mem_cmd_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_resp_0_bits_uop_mem_size_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_mem_signed_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_uses_ldq_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_uses_stq_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_is_unique_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_flush_on_commit_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_resp_0_bits_uop_csr_cmd_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_ldst_is_rs1_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_resp_0_bits_uop_ldst_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_resp_0_bits_uop_lrs1_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_resp_0_bits_uop_lrs2_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_resp_0_bits_uop_lrs3_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_resp_0_bits_uop_dst_rtype_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_resp_0_bits_uop_lrs1_rtype_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_resp_0_bits_uop_lrs2_rtype_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_frs3_en_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fcn_dw_0; // @[dcache.scala:438:7]
wire [4:0] io_lsu_resp_0_bits_uop_fcn_op_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_fp_val_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_resp_0_bits_uop_fp_rm_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_resp_0_bits_uop_fp_typ_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_xcpt_pf_if_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_xcpt_ae_if_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_xcpt_ma_if_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_bp_debug_if_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_uop_bp_xcpt_if_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_resp_0_bits_uop_debug_fsrc_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_resp_0_bits_uop_debug_tsrc_0; // @[dcache.scala:438:7]
wire [63:0] io_lsu_resp_0_bits_data_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_bits_is_hella_0; // @[dcache.scala:438:7]
wire io_lsu_resp_0_valid_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_iq_type_0_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_iq_type_1_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_iq_type_2_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_iq_type_3_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fu_code_0_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fu_code_1_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fu_code_2_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fu_code_3_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fu_code_4_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fu_code_5_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fu_code_6_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fu_code_7_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fu_code_8_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fu_code_9_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fp_ctrl_ldst_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fp_ctrl_wen_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fp_ctrl_ren1_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fp_ctrl_ren2_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fp_ctrl_ren3_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fp_ctrl_swap12_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fp_ctrl_swap23_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_store_ack_0_bits_uop_fp_ctrl_typeTagIn_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_store_ack_0_bits_uop_fp_ctrl_typeTagOut_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fp_ctrl_fromint_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fp_ctrl_toint_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fp_ctrl_fastpipe_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fp_ctrl_fma_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fp_ctrl_div_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fp_ctrl_sqrt_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fp_ctrl_wflags_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fp_ctrl_vec_0; // @[dcache.scala:438:7]
wire [31:0] io_lsu_store_ack_0_bits_uop_inst_0; // @[dcache.scala:438:7]
wire [31:0] io_lsu_store_ack_0_bits_uop_debug_inst_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_is_rvc_0; // @[dcache.scala:438:7]
wire [33:0] io_lsu_store_ack_0_bits_uop_debug_pc_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_iw_issued_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_iw_issued_partial_agen_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_iw_issued_partial_dgen_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_iw_p1_speculative_child_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_iw_p2_speculative_child_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_iw_p1_bypass_hint_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_iw_p2_bypass_hint_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_iw_p3_bypass_hint_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_dis_col_sel_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_store_ack_0_bits_uop_br_mask_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_store_ack_0_bits_uop_br_tag_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_store_ack_0_bits_uop_br_type_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_is_sfb_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_is_fence_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_is_fencei_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_is_sfence_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_is_amo_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_is_eret_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_is_sys_pc2epc_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_is_rocc_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_is_mov_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_store_ack_0_bits_uop_ftq_idx_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_edge_inst_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_store_ack_0_bits_uop_pc_lob_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_taken_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_imm_rename_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_store_ack_0_bits_uop_imm_sel_0; // @[dcache.scala:438:7]
wire [4:0] io_lsu_store_ack_0_bits_uop_pimm_0; // @[dcache.scala:438:7]
wire [19:0] io_lsu_store_ack_0_bits_uop_imm_packed_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_store_ack_0_bits_uop_op1_sel_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_store_ack_0_bits_uop_op2_sel_0; // @[dcache.scala:438:7]
wire [4:0] io_lsu_store_ack_0_bits_uop_rob_idx_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_store_ack_0_bits_uop_ldq_idx_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_store_ack_0_bits_uop_stq_idx_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_store_ack_0_bits_uop_rxq_idx_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_store_ack_0_bits_uop_pdst_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_store_ack_0_bits_uop_prs1_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_store_ack_0_bits_uop_prs2_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_store_ack_0_bits_uop_prs3_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_store_ack_0_bits_uop_ppred_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_prs1_busy_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_prs2_busy_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_prs3_busy_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_ppred_busy_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_store_ack_0_bits_uop_stale_pdst_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_exception_0; // @[dcache.scala:438:7]
wire [63:0] io_lsu_store_ack_0_bits_uop_exc_cause_0; // @[dcache.scala:438:7]
wire [4:0] io_lsu_store_ack_0_bits_uop_mem_cmd_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_store_ack_0_bits_uop_mem_size_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_mem_signed_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_uses_ldq_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_uses_stq_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_is_unique_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_flush_on_commit_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_store_ack_0_bits_uop_csr_cmd_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_ldst_is_rs1_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_store_ack_0_bits_uop_ldst_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_store_ack_0_bits_uop_lrs1_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_store_ack_0_bits_uop_lrs2_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_store_ack_0_bits_uop_lrs3_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_store_ack_0_bits_uop_dst_rtype_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_store_ack_0_bits_uop_lrs1_rtype_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_store_ack_0_bits_uop_lrs2_rtype_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_frs3_en_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fcn_dw_0; // @[dcache.scala:438:7]
wire [4:0] io_lsu_store_ack_0_bits_uop_fcn_op_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_fp_val_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_store_ack_0_bits_uop_fp_rm_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_store_ack_0_bits_uop_fp_typ_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_xcpt_pf_if_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_xcpt_ae_if_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_xcpt_ma_if_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_bp_debug_if_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_uop_bp_xcpt_if_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_store_ack_0_bits_uop_debug_fsrc_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_store_ack_0_bits_uop_debug_tsrc_0; // @[dcache.scala:438:7]
wire [33:0] io_lsu_store_ack_0_bits_addr_0; // @[dcache.scala:438:7]
wire [63:0] io_lsu_store_ack_0_bits_data_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_bits_is_hella_0; // @[dcache.scala:438:7]
wire io_lsu_store_ack_0_valid_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_iq_type_0_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_iq_type_1_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_iq_type_2_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_iq_type_3_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fu_code_0_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fu_code_1_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fu_code_2_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fu_code_3_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fu_code_4_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fu_code_5_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fu_code_6_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fu_code_7_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fu_code_8_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fu_code_9_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fp_ctrl_ldst_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fp_ctrl_wen_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fp_ctrl_ren1_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fp_ctrl_ren2_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fp_ctrl_ren3_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fp_ctrl_swap12_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fp_ctrl_swap23_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_nack_0_bits_uop_fp_ctrl_typeTagIn_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_nack_0_bits_uop_fp_ctrl_typeTagOut_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fp_ctrl_fromint_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fp_ctrl_toint_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fp_ctrl_fastpipe_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fp_ctrl_fma_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fp_ctrl_div_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fp_ctrl_sqrt_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fp_ctrl_wflags_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fp_ctrl_vec_0; // @[dcache.scala:438:7]
wire [31:0] io_lsu_nack_0_bits_uop_inst_0; // @[dcache.scala:438:7]
wire [31:0] io_lsu_nack_0_bits_uop_debug_inst_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_is_rvc_0; // @[dcache.scala:438:7]
wire [33:0] io_lsu_nack_0_bits_uop_debug_pc_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_iw_issued_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_iw_issued_partial_agen_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_iw_issued_partial_dgen_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_iw_p1_speculative_child_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_iw_p2_speculative_child_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_iw_p1_bypass_hint_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_iw_p2_bypass_hint_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_iw_p3_bypass_hint_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_dis_col_sel_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_nack_0_bits_uop_br_mask_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_nack_0_bits_uop_br_tag_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_nack_0_bits_uop_br_type_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_is_sfb_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_is_fence_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_is_fencei_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_is_sfence_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_is_amo_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_is_eret_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_is_sys_pc2epc_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_is_rocc_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_is_mov_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_nack_0_bits_uop_ftq_idx_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_edge_inst_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_nack_0_bits_uop_pc_lob_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_taken_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_imm_rename_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_nack_0_bits_uop_imm_sel_0; // @[dcache.scala:438:7]
wire [4:0] io_lsu_nack_0_bits_uop_pimm_0; // @[dcache.scala:438:7]
wire [19:0] io_lsu_nack_0_bits_uop_imm_packed_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_nack_0_bits_uop_op1_sel_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_nack_0_bits_uop_op2_sel_0; // @[dcache.scala:438:7]
wire [4:0] io_lsu_nack_0_bits_uop_rob_idx_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_nack_0_bits_uop_ldq_idx_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_nack_0_bits_uop_stq_idx_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_nack_0_bits_uop_rxq_idx_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_nack_0_bits_uop_pdst_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_nack_0_bits_uop_prs1_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_nack_0_bits_uop_prs2_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_nack_0_bits_uop_prs3_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_nack_0_bits_uop_ppred_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_prs1_busy_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_prs2_busy_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_prs3_busy_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_ppred_busy_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_nack_0_bits_uop_stale_pdst_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_exception_0; // @[dcache.scala:438:7]
wire [63:0] io_lsu_nack_0_bits_uop_exc_cause_0; // @[dcache.scala:438:7]
wire [4:0] io_lsu_nack_0_bits_uop_mem_cmd_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_nack_0_bits_uop_mem_size_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_mem_signed_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_uses_ldq_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_uses_stq_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_is_unique_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_flush_on_commit_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_nack_0_bits_uop_csr_cmd_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_ldst_is_rs1_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_nack_0_bits_uop_ldst_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_nack_0_bits_uop_lrs1_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_nack_0_bits_uop_lrs2_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_nack_0_bits_uop_lrs3_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_nack_0_bits_uop_dst_rtype_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_nack_0_bits_uop_lrs1_rtype_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_nack_0_bits_uop_lrs2_rtype_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_frs3_en_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fcn_dw_0; // @[dcache.scala:438:7]
wire [4:0] io_lsu_nack_0_bits_uop_fcn_op_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_fp_val_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_nack_0_bits_uop_fp_rm_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_nack_0_bits_uop_fp_typ_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_xcpt_pf_if_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_xcpt_ae_if_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_xcpt_ma_if_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_bp_debug_if_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_uop_bp_xcpt_if_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_nack_0_bits_uop_debug_fsrc_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_nack_0_bits_uop_debug_tsrc_0; // @[dcache.scala:438:7]
wire [33:0] io_lsu_nack_0_bits_addr_0; // @[dcache.scala:438:7]
wire [63:0] io_lsu_nack_0_bits_data_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_bits_is_hella_0; // @[dcache.scala:438:7]
wire io_lsu_nack_0_valid_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_iq_type_0_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_iq_type_1_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_iq_type_2_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_iq_type_3_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fu_code_0_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fu_code_1_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fu_code_2_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fu_code_3_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fu_code_4_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fu_code_5_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fu_code_6_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fu_code_7_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fu_code_8_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fu_code_9_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fp_ctrl_ldst_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fp_ctrl_wen_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fp_ctrl_ren1_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fp_ctrl_ren2_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fp_ctrl_ren3_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fp_ctrl_swap12_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fp_ctrl_swap23_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_ll_resp_bits_uop_fp_ctrl_typeTagIn_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_ll_resp_bits_uop_fp_ctrl_typeTagOut_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fp_ctrl_fromint_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fp_ctrl_toint_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fp_ctrl_fastpipe_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fp_ctrl_fma_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fp_ctrl_div_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fp_ctrl_sqrt_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fp_ctrl_wflags_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fp_ctrl_vec_0; // @[dcache.scala:438:7]
wire [31:0] io_lsu_ll_resp_bits_uop_inst_0; // @[dcache.scala:438:7]
wire [31:0] io_lsu_ll_resp_bits_uop_debug_inst_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_is_rvc_0; // @[dcache.scala:438:7]
wire [33:0] io_lsu_ll_resp_bits_uop_debug_pc_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_iw_issued_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_iw_issued_partial_agen_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_iw_issued_partial_dgen_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_iw_p1_speculative_child_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_iw_p2_speculative_child_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_iw_p1_bypass_hint_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_iw_p2_bypass_hint_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_iw_p3_bypass_hint_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_dis_col_sel_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_ll_resp_bits_uop_br_mask_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_ll_resp_bits_uop_br_tag_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_ll_resp_bits_uop_br_type_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_is_sfb_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_is_fence_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_is_fencei_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_is_sfence_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_is_amo_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_is_eret_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_is_sys_pc2epc_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_is_rocc_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_is_mov_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_ll_resp_bits_uop_ftq_idx_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_edge_inst_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_ll_resp_bits_uop_pc_lob_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_taken_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_imm_rename_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_ll_resp_bits_uop_imm_sel_0; // @[dcache.scala:438:7]
wire [4:0] io_lsu_ll_resp_bits_uop_pimm_0; // @[dcache.scala:438:7]
wire [19:0] io_lsu_ll_resp_bits_uop_imm_packed_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_ll_resp_bits_uop_op1_sel_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_ll_resp_bits_uop_op2_sel_0; // @[dcache.scala:438:7]
wire [4:0] io_lsu_ll_resp_bits_uop_rob_idx_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_ll_resp_bits_uop_ldq_idx_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_ll_resp_bits_uop_stq_idx_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_ll_resp_bits_uop_rxq_idx_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_ll_resp_bits_uop_pdst_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_ll_resp_bits_uop_prs1_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_ll_resp_bits_uop_prs2_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_ll_resp_bits_uop_prs3_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_ll_resp_bits_uop_ppred_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_prs1_busy_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_prs2_busy_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_prs3_busy_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_ppred_busy_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_ll_resp_bits_uop_stale_pdst_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_exception_0; // @[dcache.scala:438:7]
wire [63:0] io_lsu_ll_resp_bits_uop_exc_cause_0; // @[dcache.scala:438:7]
wire [4:0] io_lsu_ll_resp_bits_uop_mem_cmd_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_ll_resp_bits_uop_mem_size_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_mem_signed_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_uses_ldq_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_uses_stq_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_is_unique_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_flush_on_commit_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_ll_resp_bits_uop_csr_cmd_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_ldst_is_rs1_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_ll_resp_bits_uop_ldst_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_ll_resp_bits_uop_lrs1_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_ll_resp_bits_uop_lrs2_0; // @[dcache.scala:438:7]
wire [5:0] io_lsu_ll_resp_bits_uop_lrs3_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_ll_resp_bits_uop_dst_rtype_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_ll_resp_bits_uop_lrs1_rtype_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_ll_resp_bits_uop_lrs2_rtype_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_frs3_en_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fcn_dw_0; // @[dcache.scala:438:7]
wire [4:0] io_lsu_ll_resp_bits_uop_fcn_op_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_fp_val_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_ll_resp_bits_uop_fp_rm_0; // @[dcache.scala:438:7]
wire [1:0] io_lsu_ll_resp_bits_uop_fp_typ_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_xcpt_pf_if_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_xcpt_ae_if_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_xcpt_ma_if_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_bp_debug_if_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_uop_bp_xcpt_if_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_ll_resp_bits_uop_debug_fsrc_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_ll_resp_bits_uop_debug_tsrc_0; // @[dcache.scala:438:7]
wire [63:0] io_lsu_ll_resp_bits_data_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_bits_is_hella_0; // @[dcache.scala:438:7]
wire io_lsu_ll_resp_valid_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_release_bits_opcode_0; // @[dcache.scala:438:7]
wire [2:0] io_lsu_release_bits_param_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_release_bits_size_0; // @[dcache.scala:438:7]
wire [3:0] io_lsu_release_bits_source_0; // @[dcache.scala:438:7]
wire [31:0] io_lsu_release_bits_address_0; // @[dcache.scala:438:7]
wire [63:0] io_lsu_release_bits_data_0; // @[dcache.scala:438:7]
wire io_lsu_release_valid_0; // @[dcache.scala:438:7]
wire io_lsu_perf_acquire_0; // @[dcache.scala:438:7]
wire io_lsu_perf_release_0; // @[dcache.scala:438:7]
wire io_lsu_ordered_0; // @[dcache.scala:438:7]
assign auto_out_a_valid_0 = nodeOut_a_valid; // @[MixedNode.scala:542:17]
assign auto_out_a_bits_opcode_0 = nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
assign auto_out_a_bits_param_0 = nodeOut_a_bits_param; // @[MixedNode.scala:542:17]
assign auto_out_a_bits_size_0 = nodeOut_a_bits_size; // @[MixedNode.scala:542:17]
assign auto_out_a_bits_source_0 = nodeOut_a_bits_source; // @[MixedNode.scala:542:17]
assign auto_out_a_bits_address_0 = nodeOut_a_bits_address; // @[MixedNode.scala:542:17]
assign auto_out_a_bits_mask_0 = nodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
assign auto_out_a_bits_data_0 = nodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire _nodeOut_b_ready_T_1; // @[dcache.scala:822:48]
assign auto_out_b_ready_0 = nodeOut_b_ready; // @[MixedNode.scala:542:17]
wire _nodeOut_c_valid_T_4; // @[Arbiter.scala:96:24]
assign auto_out_c_valid_0 = nodeOut_c_valid; // @[MixedNode.scala:542:17]
wire [2:0] _nodeOut_c_bits_WIRE_opcode; // @[Mux.scala:30:73]
assign auto_out_c_bits_opcode_0 = nodeOut_c_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] _nodeOut_c_bits_WIRE_param; // @[Mux.scala:30:73]
assign auto_out_c_bits_param_0 = nodeOut_c_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] _nodeOut_c_bits_WIRE_size; // @[Mux.scala:30:73]
assign auto_out_c_bits_size_0 = nodeOut_c_bits_size; // @[MixedNode.scala:542:17]
wire [3:0] _nodeOut_c_bits_WIRE_source; // @[Mux.scala:30:73]
assign auto_out_c_bits_source_0 = nodeOut_c_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] _nodeOut_c_bits_WIRE_address; // @[Mux.scala:30:73]
assign auto_out_c_bits_address_0 = nodeOut_c_bits_address; // @[MixedNode.scala:542:17]
wire [63:0] _nodeOut_c_bits_WIRE_data; // @[Mux.scala:30:73]
assign auto_out_c_bits_data_0 = nodeOut_c_bits_data; // @[MixedNode.scala:542:17]
assign auto_out_d_ready_0 = nodeOut_d_ready; // @[MixedNode.scala:542:17]
assign auto_out_e_valid_0 = nodeOut_e_valid; // @[MixedNode.scala:542:17]
assign auto_out_e_bits_sink_0 = nodeOut_e_bits_sink; // @[MixedNode.scala:542:17]
wire _meta_0_io_write_valid_T = _meta_0_io_write_ready & _metaWriteArb_io_out_valid; // @[Decoupled.scala:51:35]
wire _data_io_read_0_valid_T = _dataReadArb_io_out_bits_valid_0 & _dataReadArb_io_out_valid; // @[dcache.scala:490:27, :495:63]
wire block_incoming_reqs; // @[dcache.scala:510:47]
wire _io_lsu_req_ready_T = _metaReadArb_io_in_4_ready & _dataReadArb_io_in_2_ready; // @[dcache.scala:472:27, :490:27, :511:50]
wire _io_lsu_req_ready_T_1 = ~block_incoming_reqs; // @[dcache.scala:510:47, :511:83]
assign _io_lsu_req_ready_T_2 = _io_lsu_req_ready_T & _io_lsu_req_ready_T_1; // @[dcache.scala:511:{50,80,83}]
assign io_lsu_req_ready_0 = _io_lsu_req_ready_T_2; // @[dcache.scala:438:7, :511:80]
wire _metaReadArb_io_in_4_valid_T = ~block_incoming_reqs; // @[dcache.scala:510:47, :511:83, :512:53]
wire _metaReadArb_io_in_4_valid_T_1 = io_lsu_req_valid_0 & _metaReadArb_io_in_4_valid_T; // @[dcache.scala:438:7, :512:{50,53}]
wire _dataReadArb_io_in_2_valid_T = ~block_incoming_reqs; // @[dcache.scala:510:47, :511:83, :513:53]
wire _dataReadArb_io_in_2_valid_T_1 = io_lsu_req_valid_0 & _dataReadArb_io_in_2_valid_T; // @[dcache.scala:438:7, :513:{50,53}]
wire [27:0] _metaReadArb_io_in_4_bits_req_0_idx_T = io_lsu_req_bits_0_bits_addr_0[33:6]; // @[dcache.scala:438:7, :516:77]
wire replay_req_0_uop_iq_type_0; // @[dcache.scala:527:24]
wire replay_req_0_uop_iq_type_1; // @[dcache.scala:527:24]
wire replay_req_0_uop_iq_type_2; // @[dcache.scala:527:24]
wire replay_req_0_uop_iq_type_3; // @[dcache.scala:527:24]
wire replay_req_0_uop_fu_code_0; // @[dcache.scala:527:24]
wire replay_req_0_uop_fu_code_1; // @[dcache.scala:527:24]
wire replay_req_0_uop_fu_code_2; // @[dcache.scala:527:24]
wire replay_req_0_uop_fu_code_3; // @[dcache.scala:527:24]
wire replay_req_0_uop_fu_code_4; // @[dcache.scala:527:24]
wire replay_req_0_uop_fu_code_5; // @[dcache.scala:527:24]
wire replay_req_0_uop_fu_code_6; // @[dcache.scala:527:24]
wire replay_req_0_uop_fu_code_7; // @[dcache.scala:527:24]
wire replay_req_0_uop_fu_code_8; // @[dcache.scala:527:24]
wire replay_req_0_uop_fu_code_9; // @[dcache.scala:527:24]
wire replay_req_0_uop_fp_ctrl_ldst; // @[dcache.scala:527:24]
wire replay_req_0_uop_fp_ctrl_wen; // @[dcache.scala:527:24]
wire replay_req_0_uop_fp_ctrl_ren1; // @[dcache.scala:527:24]
wire replay_req_0_uop_fp_ctrl_ren2; // @[dcache.scala:527:24]
wire replay_req_0_uop_fp_ctrl_ren3; // @[dcache.scala:527:24]
wire replay_req_0_uop_fp_ctrl_swap12; // @[dcache.scala:527:24]
wire replay_req_0_uop_fp_ctrl_swap23; // @[dcache.scala:527:24]
wire [1:0] replay_req_0_uop_fp_ctrl_typeTagIn; // @[dcache.scala:527:24]
wire [1:0] replay_req_0_uop_fp_ctrl_typeTagOut; // @[dcache.scala:527:24]
wire replay_req_0_uop_fp_ctrl_fromint; // @[dcache.scala:527:24]
wire replay_req_0_uop_fp_ctrl_toint; // @[dcache.scala:527:24]
wire replay_req_0_uop_fp_ctrl_fastpipe; // @[dcache.scala:527:24]
wire replay_req_0_uop_fp_ctrl_fma; // @[dcache.scala:527:24]
wire replay_req_0_uop_fp_ctrl_div; // @[dcache.scala:527:24]
wire replay_req_0_uop_fp_ctrl_sqrt; // @[dcache.scala:527:24]
wire replay_req_0_uop_fp_ctrl_wflags; // @[dcache.scala:527:24]
wire replay_req_0_uop_fp_ctrl_vec; // @[dcache.scala:527:24]
wire [31:0] replay_req_0_uop_inst; // @[dcache.scala:527:24]
wire [31:0] replay_req_0_uop_debug_inst; // @[dcache.scala:527:24]
wire replay_req_0_uop_is_rvc; // @[dcache.scala:527:24]
wire [33:0] replay_req_0_uop_debug_pc; // @[dcache.scala:527:24]
wire replay_req_0_uop_iw_issued; // @[dcache.scala:527:24]
wire replay_req_0_uop_iw_issued_partial_agen; // @[dcache.scala:527:24]
wire replay_req_0_uop_iw_issued_partial_dgen; // @[dcache.scala:527:24]
wire replay_req_0_uop_iw_p1_speculative_child; // @[dcache.scala:527:24]
wire replay_req_0_uop_iw_p2_speculative_child; // @[dcache.scala:527:24]
wire replay_req_0_uop_iw_p1_bypass_hint; // @[dcache.scala:527:24]
wire replay_req_0_uop_iw_p2_bypass_hint; // @[dcache.scala:527:24]
wire replay_req_0_uop_iw_p3_bypass_hint; // @[dcache.scala:527:24]
wire replay_req_0_uop_dis_col_sel; // @[dcache.scala:527:24]
wire [3:0] replay_req_0_uop_br_mask; // @[dcache.scala:527:24]
wire [1:0] replay_req_0_uop_br_tag; // @[dcache.scala:527:24]
wire [3:0] replay_req_0_uop_br_type; // @[dcache.scala:527:24]
wire replay_req_0_uop_is_sfb; // @[dcache.scala:527:24]
wire replay_req_0_uop_is_fence; // @[dcache.scala:527:24]
wire replay_req_0_uop_is_fencei; // @[dcache.scala:527:24]
wire replay_req_0_uop_is_sfence; // @[dcache.scala:527:24]
wire replay_req_0_uop_is_amo; // @[dcache.scala:527:24]
wire replay_req_0_uop_is_eret; // @[dcache.scala:527:24]
wire replay_req_0_uop_is_sys_pc2epc; // @[dcache.scala:527:24]
wire replay_req_0_uop_is_rocc; // @[dcache.scala:527:24]
wire replay_req_0_uop_is_mov; // @[dcache.scala:527:24]
wire [3:0] replay_req_0_uop_ftq_idx; // @[dcache.scala:527:24]
wire replay_req_0_uop_edge_inst; // @[dcache.scala:527:24]
wire [5:0] replay_req_0_uop_pc_lob; // @[dcache.scala:527:24]
wire replay_req_0_uop_taken; // @[dcache.scala:527:24]
wire replay_req_0_uop_imm_rename; // @[dcache.scala:527:24]
wire [2:0] replay_req_0_uop_imm_sel; // @[dcache.scala:527:24]
wire [4:0] replay_req_0_uop_pimm; // @[dcache.scala:527:24]
wire [19:0] replay_req_0_uop_imm_packed; // @[dcache.scala:527:24]
wire [1:0] replay_req_0_uop_op1_sel; // @[dcache.scala:527:24]
wire [2:0] replay_req_0_uop_op2_sel; // @[dcache.scala:527:24]
wire [4:0] replay_req_0_uop_rob_idx; // @[dcache.scala:527:24]
wire [3:0] replay_req_0_uop_ldq_idx; // @[dcache.scala:527:24]
wire [3:0] replay_req_0_uop_stq_idx; // @[dcache.scala:527:24]
wire [1:0] replay_req_0_uop_rxq_idx; // @[dcache.scala:527:24]
wire [5:0] replay_req_0_uop_pdst; // @[dcache.scala:527:24]
wire [5:0] replay_req_0_uop_prs1; // @[dcache.scala:527:24]
wire [5:0] replay_req_0_uop_prs2; // @[dcache.scala:527:24]
wire [5:0] replay_req_0_uop_prs3; // @[dcache.scala:527:24]
wire [3:0] replay_req_0_uop_ppred; // @[dcache.scala:527:24]
wire replay_req_0_uop_prs1_busy; // @[dcache.scala:527:24]
wire replay_req_0_uop_prs2_busy; // @[dcache.scala:527:24]
wire replay_req_0_uop_prs3_busy; // @[dcache.scala:527:24]
wire replay_req_0_uop_ppred_busy; // @[dcache.scala:527:24]
wire [5:0] replay_req_0_uop_stale_pdst; // @[dcache.scala:527:24]
wire replay_req_0_uop_exception; // @[dcache.scala:527:24]
wire [63:0] replay_req_0_uop_exc_cause; // @[dcache.scala:527:24]
wire [4:0] replay_req_0_uop_mem_cmd; // @[dcache.scala:527:24]
wire [1:0] replay_req_0_uop_mem_size; // @[dcache.scala:527:24]
wire replay_req_0_uop_mem_signed; // @[dcache.scala:527:24]
wire replay_req_0_uop_uses_ldq; // @[dcache.scala:527:24]
wire replay_req_0_uop_uses_stq; // @[dcache.scala:527:24]
wire replay_req_0_uop_is_unique; // @[dcache.scala:527:24]
wire replay_req_0_uop_flush_on_commit; // @[dcache.scala:527:24]
wire [2:0] replay_req_0_uop_csr_cmd; // @[dcache.scala:527:24]
wire replay_req_0_uop_ldst_is_rs1; // @[dcache.scala:527:24]
wire [5:0] replay_req_0_uop_ldst; // @[dcache.scala:527:24]
wire [5:0] replay_req_0_uop_lrs1; // @[dcache.scala:527:24]
wire [5:0] replay_req_0_uop_lrs2; // @[dcache.scala:527:24]
wire [5:0] replay_req_0_uop_lrs3; // @[dcache.scala:527:24]
wire [1:0] replay_req_0_uop_dst_rtype; // @[dcache.scala:527:24]
wire [1:0] replay_req_0_uop_lrs1_rtype; // @[dcache.scala:527:24]
wire [1:0] replay_req_0_uop_lrs2_rtype; // @[dcache.scala:527:24]
wire replay_req_0_uop_frs3_en; // @[dcache.scala:527:24]
wire replay_req_0_uop_fcn_dw; // @[dcache.scala:527:24]
wire [4:0] replay_req_0_uop_fcn_op; // @[dcache.scala:527:24]
wire replay_req_0_uop_fp_val; // @[dcache.scala:527:24]
wire [2:0] replay_req_0_uop_fp_rm; // @[dcache.scala:527:24]
wire [1:0] replay_req_0_uop_fp_typ; // @[dcache.scala:527:24]
wire replay_req_0_uop_xcpt_pf_if; // @[dcache.scala:527:24]
wire replay_req_0_uop_xcpt_ae_if; // @[dcache.scala:527:24]
wire replay_req_0_uop_xcpt_ma_if; // @[dcache.scala:527:24]
wire replay_req_0_uop_bp_debug_if; // @[dcache.scala:527:24]
wire replay_req_0_uop_bp_xcpt_if; // @[dcache.scala:527:24]
wire [2:0] replay_req_0_uop_debug_fsrc; // @[dcache.scala:527:24]
wire [2:0] replay_req_0_uop_debug_tsrc; // @[dcache.scala:527:24]
wire [33:0] replay_req_0_addr; // @[dcache.scala:527:24]
wire [63:0] replay_req_0_data; // @[dcache.scala:527:24]
wire replay_req_0_is_hella; // @[dcache.scala:527:24]
wire _mshrs_io_replay_ready_T_2 = _mshrs_io_replay_ready_T; // @[dcache.scala:534:{58,88}]
wire [27:0] _metaReadArb_io_in_0_bits_req_0_idx_T = _mshrs_io_replay_bits_addr[33:6]; // @[dcache.scala:460:21, :538:72]
wire [33:0] mshr_read_req_0_addr; // @[dcache.scala:549:27]
wire [25:0] _mshr_read_req_0_addr_T = {_mshrs_io_meta_read_bits_tag, _mshrs_io_meta_read_bits_idx}; // @[dcache.scala:460:21, :552:35]
wire [31:0] _mshr_read_req_0_addr_T_1 = {_mshr_read_req_0_addr_T, 6'h0}; // @[dcache.scala:552:{35,94}]
assign mshr_read_req_0_addr = {2'h0, _mshr_read_req_0_addr_T_1}; // @[dcache.scala:549:27, :552:{29,94}]
wire _wb_io_meta_read_ready_T_2; // @[dcache.scala:575:85]
wire _wb_fire_T = _wb_io_meta_read_ready_T_2 & _wb_io_meta_read_valid; // @[Decoupled.scala:51:35]
wire _wb_io_data_req_ready_T_2; // @[dcache.scala:580:85]
wire _wb_fire_T_1 = _wb_io_data_req_ready_T_2 & _wb_io_data_req_valid; // @[Decoupled.scala:51:35]
wire wb_fire = _wb_fire_T & _wb_fire_T_1; // @[Decoupled.scala:51:35]
wire [33:0] wb_req_0_addr; // @[dcache.scala:564:20]
wire [31:0] _wb_req_0_addr_T = {_wb_io_meta_read_bits_tag, _wb_io_data_req_bits_addr}; // @[dcache.scala:458:18, :567:28]
assign wb_req_0_addr = {2'h0, _wb_req_0_addr_T}; // @[dcache.scala:564:20, :567:{22,28}]
wire _GEN = _metaReadArb_io_in_2_ready & _dataReadArb_io_in_1_ready; // @[dcache.scala:472:27, :490:27, :575:55]
wire _wb_io_meta_read_ready_T; // @[dcache.scala:575:55]
assign _wb_io_meta_read_ready_T = _GEN; // @[dcache.scala:575:55]
wire _wb_io_data_req_ready_T; // @[dcache.scala:580:55]
assign _wb_io_data_req_ready_T = _GEN; // @[dcache.scala:575:55, :580:55]
assign _wb_io_meta_read_ready_T_2 = _wb_io_meta_read_ready_T; // @[dcache.scala:575:{55,85}]
assign _wb_io_data_req_ready_T_2 = _wb_io_data_req_ready_T; // @[dcache.scala:580:{55,85}]
wire prober_fire = _metaReadArb_io_in_1_ready & _prober_io_meta_read_valid; // @[Decoupled.scala:51:35]
wire [33:0] prober_req_0_addr; // @[dcache.scala:586:26]
wire [25:0] _prober_req_0_addr_T = {_prober_io_meta_read_bits_tag, _prober_io_meta_read_bits_idx}; // @[dcache.scala:459:22, :589:32]
wire [31:0] _prober_req_0_addr_T_1 = {_prober_req_0_addr_T, 6'h0}; // @[dcache.scala:589:{32,93}]
assign prober_req_0_addr = {2'h0, _prober_req_0_addr_T_1}; // @[dcache.scala:586:26, :589:{26,93}]
wire _T_7 = io_lsu_req_ready_0 & io_lsu_req_valid_0; // @[Decoupled.scala:51:35]
wire _s0_valid_T; // @[Decoupled.scala:51:35]
assign _s0_valid_T = _T_7; // @[Decoupled.scala:51:35]
wire _s0_req_T; // @[Decoupled.scala:51:35]
assign _s0_req_T = _T_7; // @[Decoupled.scala:51:35]
wire _s0_type_T; // @[Decoupled.scala:51:35]
assign _s0_type_T = _T_7; // @[Decoupled.scala:51:35]
wire _s0_send_resp_or_nack_T; // @[Decoupled.scala:51:35]
assign _s0_send_resp_or_nack_T = _T_7; // @[Decoupled.scala:51:35]
wire _s1_valid_T_8; // @[Decoupled.scala:51:35]
assign _s1_valid_T_8 = _T_7; // @[Decoupled.scala:51:35]
wire _GEN_0 = _mshrs_io_replay_ready_T_2 & _mshrs_io_replay_valid; // @[Decoupled.scala:51:35]
wire _s0_valid_T_1; // @[Decoupled.scala:51:35]
assign _s0_valid_T_1 = _GEN_0; // @[Decoupled.scala:51:35]
wire _s0_send_resp_or_nack_T_1; // @[Decoupled.scala:51:35]
assign _s0_send_resp_or_nack_T_1 = _GEN_0; // @[Decoupled.scala:51:35]
wire _s0_valid_T_2 = _s0_valid_T_1 | wb_fire; // @[Decoupled.scala:51:35]
wire _s0_valid_T_3 = _s0_valid_T_2 | prober_fire; // @[Decoupled.scala:51:35]
wire _s0_valid_T_4 = _s0_valid_T_3; // @[dcache.scala:613:{54,69}]
wire _GEN_1 = _metaReadArb_io_in_3_ready & _mshrs_io_meta_read_valid; // @[Decoupled.scala:51:35]
wire _s0_valid_T_5; // @[Decoupled.scala:51:35]
assign _s0_valid_T_5 = _GEN_1; // @[Decoupled.scala:51:35]
wire _s0_req_T_1; // @[Decoupled.scala:51:35]
assign _s0_req_T_1 = _GEN_1; // @[Decoupled.scala:51:35]
wire _s0_type_T_1; // @[Decoupled.scala:51:35]
assign _s0_type_T_1 = _GEN_1; // @[Decoupled.scala:51:35]
wire _s0_valid_T_6 = _s0_valid_T_4 | _s0_valid_T_5; // @[Decoupled.scala:51:35]
wire _s0_valid_T_7_0 = _s0_valid_T_6; // @[dcache.scala:613:{21,86}]
wire s0_valid_0 = _s0_valid_T ? _s0_valid_WIRE_0 : _s0_valid_T_7_0; // @[Decoupled.scala:51:35]
wire _s1_valid_T_4 = s0_valid_0; // @[dcache.scala:612:21, :638:74]
wire [31:0] _s0_req_T_2_0_uop_inst = _s0_req_T_1 ? 32'h0 : replay_req_0_uop_inst; // @[Decoupled.scala:51:35]
wire [31:0] _s0_req_T_2_0_uop_debug_inst = _s0_req_T_1 ? 32'h0 : replay_req_0_uop_debug_inst; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_is_rvc = ~_s0_req_T_1 & replay_req_0_uop_is_rvc; // @[Decoupled.scala:51:35]
wire [33:0] _s0_req_T_2_0_uop_debug_pc = _s0_req_T_1 ? 34'h0 : replay_req_0_uop_debug_pc; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_iq_type_0 = ~_s0_req_T_1 & replay_req_0_uop_iq_type_0; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_iq_type_1 = ~_s0_req_T_1 & replay_req_0_uop_iq_type_1; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_iq_type_2 = ~_s0_req_T_1 & replay_req_0_uop_iq_type_2; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_iq_type_3 = ~_s0_req_T_1 & replay_req_0_uop_iq_type_3; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fu_code_0 = ~_s0_req_T_1 & replay_req_0_uop_fu_code_0; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fu_code_1 = ~_s0_req_T_1 & replay_req_0_uop_fu_code_1; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fu_code_2 = ~_s0_req_T_1 & replay_req_0_uop_fu_code_2; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fu_code_3 = ~_s0_req_T_1 & replay_req_0_uop_fu_code_3; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fu_code_4 = ~_s0_req_T_1 & replay_req_0_uop_fu_code_4; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fu_code_5 = ~_s0_req_T_1 & replay_req_0_uop_fu_code_5; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fu_code_6 = ~_s0_req_T_1 & replay_req_0_uop_fu_code_6; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fu_code_7 = ~_s0_req_T_1 & replay_req_0_uop_fu_code_7; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fu_code_8 = ~_s0_req_T_1 & replay_req_0_uop_fu_code_8; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fu_code_9 = ~_s0_req_T_1 & replay_req_0_uop_fu_code_9; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_iw_issued = ~_s0_req_T_1 & replay_req_0_uop_iw_issued; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_iw_issued_partial_agen = ~_s0_req_T_1 & replay_req_0_uop_iw_issued_partial_agen; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_iw_issued_partial_dgen = ~_s0_req_T_1 & replay_req_0_uop_iw_issued_partial_dgen; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_iw_p1_speculative_child = ~_s0_req_T_1 & replay_req_0_uop_iw_p1_speculative_child; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_iw_p2_speculative_child = ~_s0_req_T_1 & replay_req_0_uop_iw_p2_speculative_child; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_iw_p1_bypass_hint = ~_s0_req_T_1 & replay_req_0_uop_iw_p1_bypass_hint; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_iw_p2_bypass_hint = ~_s0_req_T_1 & replay_req_0_uop_iw_p2_bypass_hint; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_iw_p3_bypass_hint = ~_s0_req_T_1 & replay_req_0_uop_iw_p3_bypass_hint; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_dis_col_sel = ~_s0_req_T_1 & replay_req_0_uop_dis_col_sel; // @[Decoupled.scala:51:35]
wire [3:0] _s0_req_T_2_0_uop_br_mask = _s0_req_T_1 ? 4'h0 : replay_req_0_uop_br_mask; // @[Decoupled.scala:51:35]
wire [1:0] _s0_req_T_2_0_uop_br_tag = _s0_req_T_1 ? 2'h0 : replay_req_0_uop_br_tag; // @[Decoupled.scala:51:35]
wire [3:0] _s0_req_T_2_0_uop_br_type = _s0_req_T_1 ? 4'h0 : replay_req_0_uop_br_type; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_is_sfb = ~_s0_req_T_1 & replay_req_0_uop_is_sfb; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_is_fence = ~_s0_req_T_1 & replay_req_0_uop_is_fence; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_is_fencei = ~_s0_req_T_1 & replay_req_0_uop_is_fencei; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_is_sfence = ~_s0_req_T_1 & replay_req_0_uop_is_sfence; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_is_amo = ~_s0_req_T_1 & replay_req_0_uop_is_amo; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_is_eret = ~_s0_req_T_1 & replay_req_0_uop_is_eret; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_is_sys_pc2epc = ~_s0_req_T_1 & replay_req_0_uop_is_sys_pc2epc; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_is_rocc = ~_s0_req_T_1 & replay_req_0_uop_is_rocc; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_is_mov = ~_s0_req_T_1 & replay_req_0_uop_is_mov; // @[Decoupled.scala:51:35]
wire [3:0] _s0_req_T_2_0_uop_ftq_idx = _s0_req_T_1 ? 4'h0 : replay_req_0_uop_ftq_idx; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_edge_inst = ~_s0_req_T_1 & replay_req_0_uop_edge_inst; // @[Decoupled.scala:51:35]
wire [5:0] _s0_req_T_2_0_uop_pc_lob = _s0_req_T_1 ? 6'h0 : replay_req_0_uop_pc_lob; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_taken = ~_s0_req_T_1 & replay_req_0_uop_taken; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_imm_rename = ~_s0_req_T_1 & replay_req_0_uop_imm_rename; // @[Decoupled.scala:51:35]
wire [2:0] _s0_req_T_2_0_uop_imm_sel = _s0_req_T_1 ? 3'h0 : replay_req_0_uop_imm_sel; // @[Decoupled.scala:51:35]
wire [4:0] _s0_req_T_2_0_uop_pimm = _s0_req_T_1 ? 5'h0 : replay_req_0_uop_pimm; // @[Decoupled.scala:51:35]
wire [19:0] _s0_req_T_2_0_uop_imm_packed = _s0_req_T_1 ? 20'h0 : replay_req_0_uop_imm_packed; // @[Decoupled.scala:51:35]
wire [1:0] _s0_req_T_2_0_uop_op1_sel = _s0_req_T_1 ? 2'h0 : replay_req_0_uop_op1_sel; // @[Decoupled.scala:51:35]
wire [2:0] _s0_req_T_2_0_uop_op2_sel = _s0_req_T_1 ? 3'h0 : replay_req_0_uop_op2_sel; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fp_ctrl_ldst = ~_s0_req_T_1 & replay_req_0_uop_fp_ctrl_ldst; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fp_ctrl_wen = ~_s0_req_T_1 & replay_req_0_uop_fp_ctrl_wen; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fp_ctrl_ren1 = ~_s0_req_T_1 & replay_req_0_uop_fp_ctrl_ren1; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fp_ctrl_ren2 = ~_s0_req_T_1 & replay_req_0_uop_fp_ctrl_ren2; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fp_ctrl_ren3 = ~_s0_req_T_1 & replay_req_0_uop_fp_ctrl_ren3; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fp_ctrl_swap12 = ~_s0_req_T_1 & replay_req_0_uop_fp_ctrl_swap12; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fp_ctrl_swap23 = ~_s0_req_T_1 & replay_req_0_uop_fp_ctrl_swap23; // @[Decoupled.scala:51:35]
wire [1:0] _s0_req_T_2_0_uop_fp_ctrl_typeTagIn = _s0_req_T_1 ? 2'h0 : replay_req_0_uop_fp_ctrl_typeTagIn; // @[Decoupled.scala:51:35]
wire [1:0] _s0_req_T_2_0_uop_fp_ctrl_typeTagOut = _s0_req_T_1 ? 2'h0 : replay_req_0_uop_fp_ctrl_typeTagOut; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fp_ctrl_fromint = ~_s0_req_T_1 & replay_req_0_uop_fp_ctrl_fromint; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fp_ctrl_toint = ~_s0_req_T_1 & replay_req_0_uop_fp_ctrl_toint; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fp_ctrl_fastpipe = ~_s0_req_T_1 & replay_req_0_uop_fp_ctrl_fastpipe; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fp_ctrl_fma = ~_s0_req_T_1 & replay_req_0_uop_fp_ctrl_fma; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fp_ctrl_div = ~_s0_req_T_1 & replay_req_0_uop_fp_ctrl_div; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fp_ctrl_sqrt = ~_s0_req_T_1 & replay_req_0_uop_fp_ctrl_sqrt; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fp_ctrl_wflags = ~_s0_req_T_1 & replay_req_0_uop_fp_ctrl_wflags; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fp_ctrl_vec = ~_s0_req_T_1 & replay_req_0_uop_fp_ctrl_vec; // @[Decoupled.scala:51:35]
wire [4:0] _s0_req_T_2_0_uop_rob_idx = _s0_req_T_1 ? 5'h0 : replay_req_0_uop_rob_idx; // @[Decoupled.scala:51:35]
wire [3:0] _s0_req_T_2_0_uop_ldq_idx = _s0_req_T_1 ? 4'h0 : replay_req_0_uop_ldq_idx; // @[Decoupled.scala:51:35]
wire [3:0] _s0_req_T_2_0_uop_stq_idx = _s0_req_T_1 ? 4'h0 : replay_req_0_uop_stq_idx; // @[Decoupled.scala:51:35]
wire [1:0] _s0_req_T_2_0_uop_rxq_idx = _s0_req_T_1 ? 2'h0 : replay_req_0_uop_rxq_idx; // @[Decoupled.scala:51:35]
wire [5:0] _s0_req_T_2_0_uop_pdst = _s0_req_T_1 ? 6'h0 : replay_req_0_uop_pdst; // @[Decoupled.scala:51:35]
wire [5:0] _s0_req_T_2_0_uop_prs1 = _s0_req_T_1 ? 6'h0 : replay_req_0_uop_prs1; // @[Decoupled.scala:51:35]
wire [5:0] _s0_req_T_2_0_uop_prs2 = _s0_req_T_1 ? 6'h0 : replay_req_0_uop_prs2; // @[Decoupled.scala:51:35]
wire [5:0] _s0_req_T_2_0_uop_prs3 = _s0_req_T_1 ? 6'h0 : replay_req_0_uop_prs3; // @[Decoupled.scala:51:35]
wire [3:0] _s0_req_T_2_0_uop_ppred = _s0_req_T_1 ? 4'h0 : replay_req_0_uop_ppred; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_prs1_busy = ~_s0_req_T_1 & replay_req_0_uop_prs1_busy; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_prs2_busy = ~_s0_req_T_1 & replay_req_0_uop_prs2_busy; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_prs3_busy = ~_s0_req_T_1 & replay_req_0_uop_prs3_busy; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_ppred_busy = ~_s0_req_T_1 & replay_req_0_uop_ppred_busy; // @[Decoupled.scala:51:35]
wire [5:0] _s0_req_T_2_0_uop_stale_pdst = _s0_req_T_1 ? 6'h0 : replay_req_0_uop_stale_pdst; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_exception = ~_s0_req_T_1 & replay_req_0_uop_exception; // @[Decoupled.scala:51:35]
wire [63:0] _s0_req_T_2_0_uop_exc_cause = _s0_req_T_1 ? 64'h0 : replay_req_0_uop_exc_cause; // @[Decoupled.scala:51:35]
wire [4:0] _s0_req_T_2_0_uop_mem_cmd = _s0_req_T_1 ? 5'h0 : replay_req_0_uop_mem_cmd; // @[Decoupled.scala:51:35]
wire [1:0] _s0_req_T_2_0_uop_mem_size = _s0_req_T_1 ? 2'h0 : replay_req_0_uop_mem_size; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_mem_signed = ~_s0_req_T_1 & replay_req_0_uop_mem_signed; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_uses_ldq = ~_s0_req_T_1 & replay_req_0_uop_uses_ldq; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_uses_stq = ~_s0_req_T_1 & replay_req_0_uop_uses_stq; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_is_unique = ~_s0_req_T_1 & replay_req_0_uop_is_unique; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_flush_on_commit = ~_s0_req_T_1 & replay_req_0_uop_flush_on_commit; // @[Decoupled.scala:51:35]
wire [2:0] _s0_req_T_2_0_uop_csr_cmd = _s0_req_T_1 ? 3'h0 : replay_req_0_uop_csr_cmd; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_ldst_is_rs1 = ~_s0_req_T_1 & replay_req_0_uop_ldst_is_rs1; // @[Decoupled.scala:51:35]
wire [5:0] _s0_req_T_2_0_uop_ldst = _s0_req_T_1 ? 6'h0 : replay_req_0_uop_ldst; // @[Decoupled.scala:51:35]
wire [5:0] _s0_req_T_2_0_uop_lrs1 = _s0_req_T_1 ? 6'h0 : replay_req_0_uop_lrs1; // @[Decoupled.scala:51:35]
wire [5:0] _s0_req_T_2_0_uop_lrs2 = _s0_req_T_1 ? 6'h0 : replay_req_0_uop_lrs2; // @[Decoupled.scala:51:35]
wire [5:0] _s0_req_T_2_0_uop_lrs3 = _s0_req_T_1 ? 6'h0 : replay_req_0_uop_lrs3; // @[Decoupled.scala:51:35]
wire [1:0] _s0_req_T_2_0_uop_dst_rtype = _s0_req_T_1 ? 2'h0 : replay_req_0_uop_dst_rtype; // @[Decoupled.scala:51:35]
wire [1:0] _s0_req_T_2_0_uop_lrs1_rtype = _s0_req_T_1 ? 2'h0 : replay_req_0_uop_lrs1_rtype; // @[Decoupled.scala:51:35]
wire [1:0] _s0_req_T_2_0_uop_lrs2_rtype = _s0_req_T_1 ? 2'h0 : replay_req_0_uop_lrs2_rtype; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_frs3_en = ~_s0_req_T_1 & replay_req_0_uop_frs3_en; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fcn_dw = ~_s0_req_T_1 & replay_req_0_uop_fcn_dw; // @[Decoupled.scala:51:35]
wire [4:0] _s0_req_T_2_0_uop_fcn_op = _s0_req_T_1 ? 5'h0 : replay_req_0_uop_fcn_op; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_fp_val = ~_s0_req_T_1 & replay_req_0_uop_fp_val; // @[Decoupled.scala:51:35]
wire [2:0] _s0_req_T_2_0_uop_fp_rm = _s0_req_T_1 ? 3'h0 : replay_req_0_uop_fp_rm; // @[Decoupled.scala:51:35]
wire [1:0] _s0_req_T_2_0_uop_fp_typ = _s0_req_T_1 ? 2'h0 : replay_req_0_uop_fp_typ; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_xcpt_pf_if = ~_s0_req_T_1 & replay_req_0_uop_xcpt_pf_if; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_xcpt_ae_if = ~_s0_req_T_1 & replay_req_0_uop_xcpt_ae_if; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_xcpt_ma_if = ~_s0_req_T_1 & replay_req_0_uop_xcpt_ma_if; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_bp_debug_if = ~_s0_req_T_1 & replay_req_0_uop_bp_debug_if; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_uop_bp_xcpt_if = ~_s0_req_T_1 & replay_req_0_uop_bp_xcpt_if; // @[Decoupled.scala:51:35]
wire [2:0] _s0_req_T_2_0_uop_debug_fsrc = _s0_req_T_1 ? 3'h0 : replay_req_0_uop_debug_fsrc; // @[Decoupled.scala:51:35]
wire [2:0] _s0_req_T_2_0_uop_debug_tsrc = _s0_req_T_1 ? 3'h0 : replay_req_0_uop_debug_tsrc; // @[Decoupled.scala:51:35]
wire [33:0] _s0_req_T_2_0_addr = _s0_req_T_1 ? mshr_read_req_0_addr : replay_req_0_addr; // @[Decoupled.scala:51:35]
wire [63:0] _s0_req_T_2_0_data = _s0_req_T_1 ? 64'h0 : replay_req_0_data; // @[Decoupled.scala:51:35]
wire _s0_req_T_2_0_is_hella = ~_s0_req_T_1 & replay_req_0_is_hella; // @[Decoupled.scala:51:35]
wire [31:0] _s0_req_T_3_0_uop_inst = _s0_req_T_2_0_uop_inst; // @[dcache.scala:618:21, :619:21]
wire [31:0] _s0_req_T_3_0_uop_debug_inst = _s0_req_T_2_0_uop_debug_inst; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_is_rvc = _s0_req_T_2_0_uop_is_rvc; // @[dcache.scala:618:21, :619:21]
wire [33:0] _s0_req_T_3_0_uop_debug_pc = _s0_req_T_2_0_uop_debug_pc; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_iq_type_0 = _s0_req_T_2_0_uop_iq_type_0; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_iq_type_1 = _s0_req_T_2_0_uop_iq_type_1; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_iq_type_2 = _s0_req_T_2_0_uop_iq_type_2; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_iq_type_3 = _s0_req_T_2_0_uop_iq_type_3; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fu_code_0 = _s0_req_T_2_0_uop_fu_code_0; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fu_code_1 = _s0_req_T_2_0_uop_fu_code_1; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fu_code_2 = _s0_req_T_2_0_uop_fu_code_2; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fu_code_3 = _s0_req_T_2_0_uop_fu_code_3; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fu_code_4 = _s0_req_T_2_0_uop_fu_code_4; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fu_code_5 = _s0_req_T_2_0_uop_fu_code_5; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fu_code_6 = _s0_req_T_2_0_uop_fu_code_6; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fu_code_7 = _s0_req_T_2_0_uop_fu_code_7; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fu_code_8 = _s0_req_T_2_0_uop_fu_code_8; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fu_code_9 = _s0_req_T_2_0_uop_fu_code_9; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_iw_issued = _s0_req_T_2_0_uop_iw_issued; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_iw_issued_partial_agen = _s0_req_T_2_0_uop_iw_issued_partial_agen; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_iw_issued_partial_dgen = _s0_req_T_2_0_uop_iw_issued_partial_dgen; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_iw_p1_speculative_child = _s0_req_T_2_0_uop_iw_p1_speculative_child; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_iw_p2_speculative_child = _s0_req_T_2_0_uop_iw_p2_speculative_child; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_iw_p1_bypass_hint = _s0_req_T_2_0_uop_iw_p1_bypass_hint; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_iw_p2_bypass_hint = _s0_req_T_2_0_uop_iw_p2_bypass_hint; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_iw_p3_bypass_hint = _s0_req_T_2_0_uop_iw_p3_bypass_hint; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_dis_col_sel = _s0_req_T_2_0_uop_dis_col_sel; // @[dcache.scala:618:21, :619:21]
wire [3:0] _s0_req_T_3_0_uop_br_mask = _s0_req_T_2_0_uop_br_mask; // @[dcache.scala:618:21, :619:21]
wire [1:0] _s0_req_T_3_0_uop_br_tag = _s0_req_T_2_0_uop_br_tag; // @[dcache.scala:618:21, :619:21]
wire [3:0] _s0_req_T_3_0_uop_br_type = _s0_req_T_2_0_uop_br_type; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_is_sfb = _s0_req_T_2_0_uop_is_sfb; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_is_fence = _s0_req_T_2_0_uop_is_fence; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_is_fencei = _s0_req_T_2_0_uop_is_fencei; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_is_sfence = _s0_req_T_2_0_uop_is_sfence; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_is_amo = _s0_req_T_2_0_uop_is_amo; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_is_eret = _s0_req_T_2_0_uop_is_eret; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_is_sys_pc2epc = _s0_req_T_2_0_uop_is_sys_pc2epc; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_is_rocc = _s0_req_T_2_0_uop_is_rocc; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_is_mov = _s0_req_T_2_0_uop_is_mov; // @[dcache.scala:618:21, :619:21]
wire [3:0] _s0_req_T_3_0_uop_ftq_idx = _s0_req_T_2_0_uop_ftq_idx; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_edge_inst = _s0_req_T_2_0_uop_edge_inst; // @[dcache.scala:618:21, :619:21]
wire [5:0] _s0_req_T_3_0_uop_pc_lob = _s0_req_T_2_0_uop_pc_lob; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_taken = _s0_req_T_2_0_uop_taken; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_imm_rename = _s0_req_T_2_0_uop_imm_rename; // @[dcache.scala:618:21, :619:21]
wire [2:0] _s0_req_T_3_0_uop_imm_sel = _s0_req_T_2_0_uop_imm_sel; // @[dcache.scala:618:21, :619:21]
wire [4:0] _s0_req_T_3_0_uop_pimm = _s0_req_T_2_0_uop_pimm; // @[dcache.scala:618:21, :619:21]
wire [19:0] _s0_req_T_3_0_uop_imm_packed = _s0_req_T_2_0_uop_imm_packed; // @[dcache.scala:618:21, :619:21]
wire [1:0] _s0_req_T_3_0_uop_op1_sel = _s0_req_T_2_0_uop_op1_sel; // @[dcache.scala:618:21, :619:21]
wire [2:0] _s0_req_T_3_0_uop_op2_sel = _s0_req_T_2_0_uop_op2_sel; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fp_ctrl_ldst = _s0_req_T_2_0_uop_fp_ctrl_ldst; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fp_ctrl_wen = _s0_req_T_2_0_uop_fp_ctrl_wen; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fp_ctrl_ren1 = _s0_req_T_2_0_uop_fp_ctrl_ren1; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fp_ctrl_ren2 = _s0_req_T_2_0_uop_fp_ctrl_ren2; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fp_ctrl_ren3 = _s0_req_T_2_0_uop_fp_ctrl_ren3; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fp_ctrl_swap12 = _s0_req_T_2_0_uop_fp_ctrl_swap12; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fp_ctrl_swap23 = _s0_req_T_2_0_uop_fp_ctrl_swap23; // @[dcache.scala:618:21, :619:21]
wire [1:0] _s0_req_T_3_0_uop_fp_ctrl_typeTagIn = _s0_req_T_2_0_uop_fp_ctrl_typeTagIn; // @[dcache.scala:618:21, :619:21]
wire [1:0] _s0_req_T_3_0_uop_fp_ctrl_typeTagOut = _s0_req_T_2_0_uop_fp_ctrl_typeTagOut; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fp_ctrl_fromint = _s0_req_T_2_0_uop_fp_ctrl_fromint; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fp_ctrl_toint = _s0_req_T_2_0_uop_fp_ctrl_toint; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fp_ctrl_fastpipe = _s0_req_T_2_0_uop_fp_ctrl_fastpipe; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fp_ctrl_fma = _s0_req_T_2_0_uop_fp_ctrl_fma; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fp_ctrl_div = _s0_req_T_2_0_uop_fp_ctrl_div; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fp_ctrl_sqrt = _s0_req_T_2_0_uop_fp_ctrl_sqrt; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fp_ctrl_wflags = _s0_req_T_2_0_uop_fp_ctrl_wflags; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fp_ctrl_vec = _s0_req_T_2_0_uop_fp_ctrl_vec; // @[dcache.scala:618:21, :619:21]
wire [4:0] _s0_req_T_3_0_uop_rob_idx = _s0_req_T_2_0_uop_rob_idx; // @[dcache.scala:618:21, :619:21]
wire [3:0] _s0_req_T_3_0_uop_ldq_idx = _s0_req_T_2_0_uop_ldq_idx; // @[dcache.scala:618:21, :619:21]
wire [3:0] _s0_req_T_3_0_uop_stq_idx = _s0_req_T_2_0_uop_stq_idx; // @[dcache.scala:618:21, :619:21]
wire [1:0] _s0_req_T_3_0_uop_rxq_idx = _s0_req_T_2_0_uop_rxq_idx; // @[dcache.scala:618:21, :619:21]
wire [5:0] _s0_req_T_3_0_uop_pdst = _s0_req_T_2_0_uop_pdst; // @[dcache.scala:618:21, :619:21]
wire [5:0] _s0_req_T_3_0_uop_prs1 = _s0_req_T_2_0_uop_prs1; // @[dcache.scala:618:21, :619:21]
wire [5:0] _s0_req_T_3_0_uop_prs2 = _s0_req_T_2_0_uop_prs2; // @[dcache.scala:618:21, :619:21]
wire [5:0] _s0_req_T_3_0_uop_prs3 = _s0_req_T_2_0_uop_prs3; // @[dcache.scala:618:21, :619:21]
wire [3:0] _s0_req_T_3_0_uop_ppred = _s0_req_T_2_0_uop_ppred; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_prs1_busy = _s0_req_T_2_0_uop_prs1_busy; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_prs2_busy = _s0_req_T_2_0_uop_prs2_busy; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_prs3_busy = _s0_req_T_2_0_uop_prs3_busy; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_ppred_busy = _s0_req_T_2_0_uop_ppred_busy; // @[dcache.scala:618:21, :619:21]
wire [5:0] _s0_req_T_3_0_uop_stale_pdst = _s0_req_T_2_0_uop_stale_pdst; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_exception = _s0_req_T_2_0_uop_exception; // @[dcache.scala:618:21, :619:21]
wire [63:0] _s0_req_T_3_0_uop_exc_cause = _s0_req_T_2_0_uop_exc_cause; // @[dcache.scala:618:21, :619:21]
wire [4:0] _s0_req_T_3_0_uop_mem_cmd = _s0_req_T_2_0_uop_mem_cmd; // @[dcache.scala:618:21, :619:21]
wire [1:0] _s0_req_T_3_0_uop_mem_size = _s0_req_T_2_0_uop_mem_size; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_mem_signed = _s0_req_T_2_0_uop_mem_signed; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_uses_ldq = _s0_req_T_2_0_uop_uses_ldq; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_uses_stq = _s0_req_T_2_0_uop_uses_stq; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_is_unique = _s0_req_T_2_0_uop_is_unique; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_flush_on_commit = _s0_req_T_2_0_uop_flush_on_commit; // @[dcache.scala:618:21, :619:21]
wire [2:0] _s0_req_T_3_0_uop_csr_cmd = _s0_req_T_2_0_uop_csr_cmd; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_ldst_is_rs1 = _s0_req_T_2_0_uop_ldst_is_rs1; // @[dcache.scala:618:21, :619:21]
wire [5:0] _s0_req_T_3_0_uop_ldst = _s0_req_T_2_0_uop_ldst; // @[dcache.scala:618:21, :619:21]
wire [5:0] _s0_req_T_3_0_uop_lrs1 = _s0_req_T_2_0_uop_lrs1; // @[dcache.scala:618:21, :619:21]
wire [5:0] _s0_req_T_3_0_uop_lrs2 = _s0_req_T_2_0_uop_lrs2; // @[dcache.scala:618:21, :619:21]
wire [5:0] _s0_req_T_3_0_uop_lrs3 = _s0_req_T_2_0_uop_lrs3; // @[dcache.scala:618:21, :619:21]
wire [1:0] _s0_req_T_3_0_uop_dst_rtype = _s0_req_T_2_0_uop_dst_rtype; // @[dcache.scala:618:21, :619:21]
wire [1:0] _s0_req_T_3_0_uop_lrs1_rtype = _s0_req_T_2_0_uop_lrs1_rtype; // @[dcache.scala:618:21, :619:21]
wire [1:0] _s0_req_T_3_0_uop_lrs2_rtype = _s0_req_T_2_0_uop_lrs2_rtype; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_frs3_en = _s0_req_T_2_0_uop_frs3_en; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fcn_dw = _s0_req_T_2_0_uop_fcn_dw; // @[dcache.scala:618:21, :619:21]
wire [4:0] _s0_req_T_3_0_uop_fcn_op = _s0_req_T_2_0_uop_fcn_op; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_fp_val = _s0_req_T_2_0_uop_fp_val; // @[dcache.scala:618:21, :619:21]
wire [2:0] _s0_req_T_3_0_uop_fp_rm = _s0_req_T_2_0_uop_fp_rm; // @[dcache.scala:618:21, :619:21]
wire [1:0] _s0_req_T_3_0_uop_fp_typ = _s0_req_T_2_0_uop_fp_typ; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_xcpt_pf_if = _s0_req_T_2_0_uop_xcpt_pf_if; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_xcpt_ae_if = _s0_req_T_2_0_uop_xcpt_ae_if; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_xcpt_ma_if = _s0_req_T_2_0_uop_xcpt_ma_if; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_bp_debug_if = _s0_req_T_2_0_uop_bp_debug_if; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_uop_bp_xcpt_if = _s0_req_T_2_0_uop_bp_xcpt_if; // @[dcache.scala:618:21, :619:21]
wire [2:0] _s0_req_T_3_0_uop_debug_fsrc = _s0_req_T_2_0_uop_debug_fsrc; // @[dcache.scala:618:21, :619:21]
wire [2:0] _s0_req_T_3_0_uop_debug_tsrc = _s0_req_T_2_0_uop_debug_tsrc; // @[dcache.scala:618:21, :619:21]
wire [33:0] _s0_req_T_3_0_addr = _s0_req_T_2_0_addr; // @[dcache.scala:618:21, :619:21]
wire [63:0] _s0_req_T_3_0_data = _s0_req_T_2_0_data; // @[dcache.scala:618:21, :619:21]
wire _s0_req_T_3_0_is_hella = _s0_req_T_2_0_is_hella; // @[dcache.scala:618:21, :619:21]
wire [31:0] _s0_req_T_4_0_uop_inst = prober_fire ? 32'h0 : _s0_req_T_3_0_uop_inst; // @[Decoupled.scala:51:35]
wire [31:0] _s0_req_T_4_0_uop_debug_inst = prober_fire ? 32'h0 : _s0_req_T_3_0_uop_debug_inst; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_is_rvc = ~prober_fire & _s0_req_T_3_0_uop_is_rvc; // @[Decoupled.scala:51:35]
wire [33:0] _s0_req_T_4_0_uop_debug_pc = prober_fire ? 34'h0 : _s0_req_T_3_0_uop_debug_pc; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_iq_type_0 = ~prober_fire & _s0_req_T_3_0_uop_iq_type_0; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_iq_type_1 = ~prober_fire & _s0_req_T_3_0_uop_iq_type_1; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_iq_type_2 = ~prober_fire & _s0_req_T_3_0_uop_iq_type_2; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_iq_type_3 = ~prober_fire & _s0_req_T_3_0_uop_iq_type_3; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fu_code_0 = ~prober_fire & _s0_req_T_3_0_uop_fu_code_0; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fu_code_1 = ~prober_fire & _s0_req_T_3_0_uop_fu_code_1; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fu_code_2 = ~prober_fire & _s0_req_T_3_0_uop_fu_code_2; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fu_code_3 = ~prober_fire & _s0_req_T_3_0_uop_fu_code_3; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fu_code_4 = ~prober_fire & _s0_req_T_3_0_uop_fu_code_4; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fu_code_5 = ~prober_fire & _s0_req_T_3_0_uop_fu_code_5; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fu_code_6 = ~prober_fire & _s0_req_T_3_0_uop_fu_code_6; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fu_code_7 = ~prober_fire & _s0_req_T_3_0_uop_fu_code_7; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fu_code_8 = ~prober_fire & _s0_req_T_3_0_uop_fu_code_8; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fu_code_9 = ~prober_fire & _s0_req_T_3_0_uop_fu_code_9; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_iw_issued = ~prober_fire & _s0_req_T_3_0_uop_iw_issued; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_iw_issued_partial_agen = ~prober_fire & _s0_req_T_3_0_uop_iw_issued_partial_agen; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_iw_issued_partial_dgen = ~prober_fire & _s0_req_T_3_0_uop_iw_issued_partial_dgen; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_iw_p1_speculative_child = ~prober_fire & _s0_req_T_3_0_uop_iw_p1_speculative_child; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_iw_p2_speculative_child = ~prober_fire & _s0_req_T_3_0_uop_iw_p2_speculative_child; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_iw_p1_bypass_hint = ~prober_fire & _s0_req_T_3_0_uop_iw_p1_bypass_hint; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_iw_p2_bypass_hint = ~prober_fire & _s0_req_T_3_0_uop_iw_p2_bypass_hint; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_iw_p3_bypass_hint = ~prober_fire & _s0_req_T_3_0_uop_iw_p3_bypass_hint; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_dis_col_sel = ~prober_fire & _s0_req_T_3_0_uop_dis_col_sel; // @[Decoupled.scala:51:35]
wire [3:0] _s0_req_T_4_0_uop_br_mask = prober_fire ? 4'h0 : _s0_req_T_3_0_uop_br_mask; // @[Decoupled.scala:51:35]
wire [1:0] _s0_req_T_4_0_uop_br_tag = prober_fire ? 2'h0 : _s0_req_T_3_0_uop_br_tag; // @[Decoupled.scala:51:35]
wire [3:0] _s0_req_T_4_0_uop_br_type = prober_fire ? 4'h0 : _s0_req_T_3_0_uop_br_type; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_is_sfb = ~prober_fire & _s0_req_T_3_0_uop_is_sfb; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_is_fence = ~prober_fire & _s0_req_T_3_0_uop_is_fence; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_is_fencei = ~prober_fire & _s0_req_T_3_0_uop_is_fencei; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_is_sfence = ~prober_fire & _s0_req_T_3_0_uop_is_sfence; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_is_amo = ~prober_fire & _s0_req_T_3_0_uop_is_amo; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_is_eret = ~prober_fire & _s0_req_T_3_0_uop_is_eret; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_is_sys_pc2epc = ~prober_fire & _s0_req_T_3_0_uop_is_sys_pc2epc; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_is_rocc = ~prober_fire & _s0_req_T_3_0_uop_is_rocc; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_is_mov = ~prober_fire & _s0_req_T_3_0_uop_is_mov; // @[Decoupled.scala:51:35]
wire [3:0] _s0_req_T_4_0_uop_ftq_idx = prober_fire ? 4'h0 : _s0_req_T_3_0_uop_ftq_idx; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_edge_inst = ~prober_fire & _s0_req_T_3_0_uop_edge_inst; // @[Decoupled.scala:51:35]
wire [5:0] _s0_req_T_4_0_uop_pc_lob = prober_fire ? 6'h0 : _s0_req_T_3_0_uop_pc_lob; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_taken = ~prober_fire & _s0_req_T_3_0_uop_taken; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_imm_rename = ~prober_fire & _s0_req_T_3_0_uop_imm_rename; // @[Decoupled.scala:51:35]
wire [2:0] _s0_req_T_4_0_uop_imm_sel = prober_fire ? 3'h0 : _s0_req_T_3_0_uop_imm_sel; // @[Decoupled.scala:51:35]
wire [4:0] _s0_req_T_4_0_uop_pimm = prober_fire ? 5'h0 : _s0_req_T_3_0_uop_pimm; // @[Decoupled.scala:51:35]
wire [19:0] _s0_req_T_4_0_uop_imm_packed = prober_fire ? 20'h0 : _s0_req_T_3_0_uop_imm_packed; // @[Decoupled.scala:51:35]
wire [1:0] _s0_req_T_4_0_uop_op1_sel = prober_fire ? 2'h0 : _s0_req_T_3_0_uop_op1_sel; // @[Decoupled.scala:51:35]
wire [2:0] _s0_req_T_4_0_uop_op2_sel = prober_fire ? 3'h0 : _s0_req_T_3_0_uop_op2_sel; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fp_ctrl_ldst = ~prober_fire & _s0_req_T_3_0_uop_fp_ctrl_ldst; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fp_ctrl_wen = ~prober_fire & _s0_req_T_3_0_uop_fp_ctrl_wen; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fp_ctrl_ren1 = ~prober_fire & _s0_req_T_3_0_uop_fp_ctrl_ren1; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fp_ctrl_ren2 = ~prober_fire & _s0_req_T_3_0_uop_fp_ctrl_ren2; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fp_ctrl_ren3 = ~prober_fire & _s0_req_T_3_0_uop_fp_ctrl_ren3; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fp_ctrl_swap12 = ~prober_fire & _s0_req_T_3_0_uop_fp_ctrl_swap12; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fp_ctrl_swap23 = ~prober_fire & _s0_req_T_3_0_uop_fp_ctrl_swap23; // @[Decoupled.scala:51:35]
wire [1:0] _s0_req_T_4_0_uop_fp_ctrl_typeTagIn = prober_fire ? 2'h0 : _s0_req_T_3_0_uop_fp_ctrl_typeTagIn; // @[Decoupled.scala:51:35]
wire [1:0] _s0_req_T_4_0_uop_fp_ctrl_typeTagOut = prober_fire ? 2'h0 : _s0_req_T_3_0_uop_fp_ctrl_typeTagOut; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fp_ctrl_fromint = ~prober_fire & _s0_req_T_3_0_uop_fp_ctrl_fromint; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fp_ctrl_toint = ~prober_fire & _s0_req_T_3_0_uop_fp_ctrl_toint; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fp_ctrl_fastpipe = ~prober_fire & _s0_req_T_3_0_uop_fp_ctrl_fastpipe; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fp_ctrl_fma = ~prober_fire & _s0_req_T_3_0_uop_fp_ctrl_fma; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fp_ctrl_div = ~prober_fire & _s0_req_T_3_0_uop_fp_ctrl_div; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fp_ctrl_sqrt = ~prober_fire & _s0_req_T_3_0_uop_fp_ctrl_sqrt; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fp_ctrl_wflags = ~prober_fire & _s0_req_T_3_0_uop_fp_ctrl_wflags; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fp_ctrl_vec = ~prober_fire & _s0_req_T_3_0_uop_fp_ctrl_vec; // @[Decoupled.scala:51:35]
wire [4:0] _s0_req_T_4_0_uop_rob_idx = prober_fire ? 5'h0 : _s0_req_T_3_0_uop_rob_idx; // @[Decoupled.scala:51:35]
wire [3:0] _s0_req_T_4_0_uop_ldq_idx = prober_fire ? 4'h0 : _s0_req_T_3_0_uop_ldq_idx; // @[Decoupled.scala:51:35]
wire [3:0] _s0_req_T_4_0_uop_stq_idx = prober_fire ? 4'h0 : _s0_req_T_3_0_uop_stq_idx; // @[Decoupled.scala:51:35]
wire [1:0] _s0_req_T_4_0_uop_rxq_idx = prober_fire ? 2'h0 : _s0_req_T_3_0_uop_rxq_idx; // @[Decoupled.scala:51:35]
wire [5:0] _s0_req_T_4_0_uop_pdst = prober_fire ? 6'h0 : _s0_req_T_3_0_uop_pdst; // @[Decoupled.scala:51:35]
wire [5:0] _s0_req_T_4_0_uop_prs1 = prober_fire ? 6'h0 : _s0_req_T_3_0_uop_prs1; // @[Decoupled.scala:51:35]
wire [5:0] _s0_req_T_4_0_uop_prs2 = prober_fire ? 6'h0 : _s0_req_T_3_0_uop_prs2; // @[Decoupled.scala:51:35]
wire [5:0] _s0_req_T_4_0_uop_prs3 = prober_fire ? 6'h0 : _s0_req_T_3_0_uop_prs3; // @[Decoupled.scala:51:35]
wire [3:0] _s0_req_T_4_0_uop_ppred = prober_fire ? 4'h0 : _s0_req_T_3_0_uop_ppred; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_prs1_busy = ~prober_fire & _s0_req_T_3_0_uop_prs1_busy; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_prs2_busy = ~prober_fire & _s0_req_T_3_0_uop_prs2_busy; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_prs3_busy = ~prober_fire & _s0_req_T_3_0_uop_prs3_busy; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_ppred_busy = ~prober_fire & _s0_req_T_3_0_uop_ppred_busy; // @[Decoupled.scala:51:35]
wire [5:0] _s0_req_T_4_0_uop_stale_pdst = prober_fire ? 6'h0 : _s0_req_T_3_0_uop_stale_pdst; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_exception = ~prober_fire & _s0_req_T_3_0_uop_exception; // @[Decoupled.scala:51:35]
wire [63:0] _s0_req_T_4_0_uop_exc_cause = prober_fire ? 64'h0 : _s0_req_T_3_0_uop_exc_cause; // @[Decoupled.scala:51:35]
wire [4:0] _s0_req_T_4_0_uop_mem_cmd = prober_fire ? 5'h0 : _s0_req_T_3_0_uop_mem_cmd; // @[Decoupled.scala:51:35]
wire [1:0] _s0_req_T_4_0_uop_mem_size = prober_fire ? 2'h0 : _s0_req_T_3_0_uop_mem_size; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_mem_signed = ~prober_fire & _s0_req_T_3_0_uop_mem_signed; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_uses_ldq = ~prober_fire & _s0_req_T_3_0_uop_uses_ldq; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_uses_stq = ~prober_fire & _s0_req_T_3_0_uop_uses_stq; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_is_unique = ~prober_fire & _s0_req_T_3_0_uop_is_unique; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_flush_on_commit = ~prober_fire & _s0_req_T_3_0_uop_flush_on_commit; // @[Decoupled.scala:51:35]
wire [2:0] _s0_req_T_4_0_uop_csr_cmd = prober_fire ? 3'h0 : _s0_req_T_3_0_uop_csr_cmd; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_ldst_is_rs1 = ~prober_fire & _s0_req_T_3_0_uop_ldst_is_rs1; // @[Decoupled.scala:51:35]
wire [5:0] _s0_req_T_4_0_uop_ldst = prober_fire ? 6'h0 : _s0_req_T_3_0_uop_ldst; // @[Decoupled.scala:51:35]
wire [5:0] _s0_req_T_4_0_uop_lrs1 = prober_fire ? 6'h0 : _s0_req_T_3_0_uop_lrs1; // @[Decoupled.scala:51:35]
wire [5:0] _s0_req_T_4_0_uop_lrs2 = prober_fire ? 6'h0 : _s0_req_T_3_0_uop_lrs2; // @[Decoupled.scala:51:35]
wire [5:0] _s0_req_T_4_0_uop_lrs3 = prober_fire ? 6'h0 : _s0_req_T_3_0_uop_lrs3; // @[Decoupled.scala:51:35]
wire [1:0] _s0_req_T_4_0_uop_dst_rtype = prober_fire ? 2'h0 : _s0_req_T_3_0_uop_dst_rtype; // @[Decoupled.scala:51:35]
wire [1:0] _s0_req_T_4_0_uop_lrs1_rtype = prober_fire ? 2'h0 : _s0_req_T_3_0_uop_lrs1_rtype; // @[Decoupled.scala:51:35]
wire [1:0] _s0_req_T_4_0_uop_lrs2_rtype = prober_fire ? 2'h0 : _s0_req_T_3_0_uop_lrs2_rtype; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_frs3_en = ~prober_fire & _s0_req_T_3_0_uop_frs3_en; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fcn_dw = ~prober_fire & _s0_req_T_3_0_uop_fcn_dw; // @[Decoupled.scala:51:35]
wire [4:0] _s0_req_T_4_0_uop_fcn_op = prober_fire ? 5'h0 : _s0_req_T_3_0_uop_fcn_op; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_fp_val = ~prober_fire & _s0_req_T_3_0_uop_fp_val; // @[Decoupled.scala:51:35]
wire [2:0] _s0_req_T_4_0_uop_fp_rm = prober_fire ? 3'h0 : _s0_req_T_3_0_uop_fp_rm; // @[Decoupled.scala:51:35]
wire [1:0] _s0_req_T_4_0_uop_fp_typ = prober_fire ? 2'h0 : _s0_req_T_3_0_uop_fp_typ; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_xcpt_pf_if = ~prober_fire & _s0_req_T_3_0_uop_xcpt_pf_if; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_xcpt_ae_if = ~prober_fire & _s0_req_T_3_0_uop_xcpt_ae_if; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_xcpt_ma_if = ~prober_fire & _s0_req_T_3_0_uop_xcpt_ma_if; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_bp_debug_if = ~prober_fire & _s0_req_T_3_0_uop_bp_debug_if; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_uop_bp_xcpt_if = ~prober_fire & _s0_req_T_3_0_uop_bp_xcpt_if; // @[Decoupled.scala:51:35]
wire [2:0] _s0_req_T_4_0_uop_debug_fsrc = prober_fire ? 3'h0 : _s0_req_T_3_0_uop_debug_fsrc; // @[Decoupled.scala:51:35]
wire [2:0] _s0_req_T_4_0_uop_debug_tsrc = prober_fire ? 3'h0 : _s0_req_T_3_0_uop_debug_tsrc; // @[Decoupled.scala:51:35]
wire [33:0] _s0_req_T_4_0_addr = prober_fire ? prober_req_0_addr : _s0_req_T_3_0_addr; // @[Decoupled.scala:51:35]
wire [63:0] _s0_req_T_4_0_data = prober_fire ? 64'h0 : _s0_req_T_3_0_data; // @[Decoupled.scala:51:35]
wire _s0_req_T_4_0_is_hella = ~prober_fire & _s0_req_T_3_0_is_hella; // @[Decoupled.scala:51:35]
wire [31:0] _s0_req_T_5_0_uop_inst = wb_fire ? 32'h0 : _s0_req_T_4_0_uop_inst; // @[dcache.scala:563:38, :616:21, :617:21]
wire [31:0] _s0_req_T_5_0_uop_debug_inst = wb_fire ? 32'h0 : _s0_req_T_4_0_uop_debug_inst; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_is_rvc = ~wb_fire & _s0_req_T_4_0_uop_is_rvc; // @[dcache.scala:563:38, :616:21, :617:21]
wire [33:0] _s0_req_T_5_0_uop_debug_pc = wb_fire ? 34'h0 : _s0_req_T_4_0_uop_debug_pc; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_iq_type_0 = ~wb_fire & _s0_req_T_4_0_uop_iq_type_0; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_iq_type_1 = ~wb_fire & _s0_req_T_4_0_uop_iq_type_1; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_iq_type_2 = ~wb_fire & _s0_req_T_4_0_uop_iq_type_2; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_iq_type_3 = ~wb_fire & _s0_req_T_4_0_uop_iq_type_3; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fu_code_0 = ~wb_fire & _s0_req_T_4_0_uop_fu_code_0; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fu_code_1 = ~wb_fire & _s0_req_T_4_0_uop_fu_code_1; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fu_code_2 = ~wb_fire & _s0_req_T_4_0_uop_fu_code_2; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fu_code_3 = ~wb_fire & _s0_req_T_4_0_uop_fu_code_3; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fu_code_4 = ~wb_fire & _s0_req_T_4_0_uop_fu_code_4; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fu_code_5 = ~wb_fire & _s0_req_T_4_0_uop_fu_code_5; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fu_code_6 = ~wb_fire & _s0_req_T_4_0_uop_fu_code_6; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fu_code_7 = ~wb_fire & _s0_req_T_4_0_uop_fu_code_7; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fu_code_8 = ~wb_fire & _s0_req_T_4_0_uop_fu_code_8; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fu_code_9 = ~wb_fire & _s0_req_T_4_0_uop_fu_code_9; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_iw_issued = ~wb_fire & _s0_req_T_4_0_uop_iw_issued; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_iw_issued_partial_agen = ~wb_fire & _s0_req_T_4_0_uop_iw_issued_partial_agen; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_iw_issued_partial_dgen = ~wb_fire & _s0_req_T_4_0_uop_iw_issued_partial_dgen; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_iw_p1_speculative_child = ~wb_fire & _s0_req_T_4_0_uop_iw_p1_speculative_child; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_iw_p2_speculative_child = ~wb_fire & _s0_req_T_4_0_uop_iw_p2_speculative_child; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_iw_p1_bypass_hint = ~wb_fire & _s0_req_T_4_0_uop_iw_p1_bypass_hint; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_iw_p2_bypass_hint = ~wb_fire & _s0_req_T_4_0_uop_iw_p2_bypass_hint; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_iw_p3_bypass_hint = ~wb_fire & _s0_req_T_4_0_uop_iw_p3_bypass_hint; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_dis_col_sel = ~wb_fire & _s0_req_T_4_0_uop_dis_col_sel; // @[dcache.scala:563:38, :616:21, :617:21]
wire [3:0] _s0_req_T_5_0_uop_br_mask = wb_fire ? 4'h0 : _s0_req_T_4_0_uop_br_mask; // @[dcache.scala:563:38, :616:21, :617:21]
wire [1:0] _s0_req_T_5_0_uop_br_tag = wb_fire ? 2'h0 : _s0_req_T_4_0_uop_br_tag; // @[dcache.scala:563:38, :616:21, :617:21]
wire [3:0] _s0_req_T_5_0_uop_br_type = wb_fire ? 4'h0 : _s0_req_T_4_0_uop_br_type; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_is_sfb = ~wb_fire & _s0_req_T_4_0_uop_is_sfb; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_is_fence = ~wb_fire & _s0_req_T_4_0_uop_is_fence; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_is_fencei = ~wb_fire & _s0_req_T_4_0_uop_is_fencei; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_is_sfence = ~wb_fire & _s0_req_T_4_0_uop_is_sfence; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_is_amo = ~wb_fire & _s0_req_T_4_0_uop_is_amo; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_is_eret = ~wb_fire & _s0_req_T_4_0_uop_is_eret; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_is_sys_pc2epc = ~wb_fire & _s0_req_T_4_0_uop_is_sys_pc2epc; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_is_rocc = ~wb_fire & _s0_req_T_4_0_uop_is_rocc; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_is_mov = ~wb_fire & _s0_req_T_4_0_uop_is_mov; // @[dcache.scala:563:38, :616:21, :617:21]
wire [3:0] _s0_req_T_5_0_uop_ftq_idx = wb_fire ? 4'h0 : _s0_req_T_4_0_uop_ftq_idx; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_edge_inst = ~wb_fire & _s0_req_T_4_0_uop_edge_inst; // @[dcache.scala:563:38, :616:21, :617:21]
wire [5:0] _s0_req_T_5_0_uop_pc_lob = wb_fire ? 6'h0 : _s0_req_T_4_0_uop_pc_lob; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_taken = ~wb_fire & _s0_req_T_4_0_uop_taken; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_imm_rename = ~wb_fire & _s0_req_T_4_0_uop_imm_rename; // @[dcache.scala:563:38, :616:21, :617:21]
wire [2:0] _s0_req_T_5_0_uop_imm_sel = wb_fire ? 3'h0 : _s0_req_T_4_0_uop_imm_sel; // @[dcache.scala:563:38, :616:21, :617:21]
wire [4:0] _s0_req_T_5_0_uop_pimm = wb_fire ? 5'h0 : _s0_req_T_4_0_uop_pimm; // @[dcache.scala:563:38, :616:21, :617:21]
wire [19:0] _s0_req_T_5_0_uop_imm_packed = wb_fire ? 20'h0 : _s0_req_T_4_0_uop_imm_packed; // @[dcache.scala:563:38, :616:21, :617:21]
wire [1:0] _s0_req_T_5_0_uop_op1_sel = wb_fire ? 2'h0 : _s0_req_T_4_0_uop_op1_sel; // @[dcache.scala:563:38, :616:21, :617:21]
wire [2:0] _s0_req_T_5_0_uop_op2_sel = wb_fire ? 3'h0 : _s0_req_T_4_0_uop_op2_sel; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fp_ctrl_ldst = ~wb_fire & _s0_req_T_4_0_uop_fp_ctrl_ldst; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fp_ctrl_wen = ~wb_fire & _s0_req_T_4_0_uop_fp_ctrl_wen; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fp_ctrl_ren1 = ~wb_fire & _s0_req_T_4_0_uop_fp_ctrl_ren1; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fp_ctrl_ren2 = ~wb_fire & _s0_req_T_4_0_uop_fp_ctrl_ren2; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fp_ctrl_ren3 = ~wb_fire & _s0_req_T_4_0_uop_fp_ctrl_ren3; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fp_ctrl_swap12 = ~wb_fire & _s0_req_T_4_0_uop_fp_ctrl_swap12; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fp_ctrl_swap23 = ~wb_fire & _s0_req_T_4_0_uop_fp_ctrl_swap23; // @[dcache.scala:563:38, :616:21, :617:21]
wire [1:0] _s0_req_T_5_0_uop_fp_ctrl_typeTagIn = wb_fire ? 2'h0 : _s0_req_T_4_0_uop_fp_ctrl_typeTagIn; // @[dcache.scala:563:38, :616:21, :617:21]
wire [1:0] _s0_req_T_5_0_uop_fp_ctrl_typeTagOut = wb_fire ? 2'h0 : _s0_req_T_4_0_uop_fp_ctrl_typeTagOut; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fp_ctrl_fromint = ~wb_fire & _s0_req_T_4_0_uop_fp_ctrl_fromint; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fp_ctrl_toint = ~wb_fire & _s0_req_T_4_0_uop_fp_ctrl_toint; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fp_ctrl_fastpipe = ~wb_fire & _s0_req_T_4_0_uop_fp_ctrl_fastpipe; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fp_ctrl_fma = ~wb_fire & _s0_req_T_4_0_uop_fp_ctrl_fma; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fp_ctrl_div = ~wb_fire & _s0_req_T_4_0_uop_fp_ctrl_div; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fp_ctrl_sqrt = ~wb_fire & _s0_req_T_4_0_uop_fp_ctrl_sqrt; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fp_ctrl_wflags = ~wb_fire & _s0_req_T_4_0_uop_fp_ctrl_wflags; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fp_ctrl_vec = ~wb_fire & _s0_req_T_4_0_uop_fp_ctrl_vec; // @[dcache.scala:563:38, :616:21, :617:21]
wire [4:0] _s0_req_T_5_0_uop_rob_idx = wb_fire ? 5'h0 : _s0_req_T_4_0_uop_rob_idx; // @[dcache.scala:563:38, :616:21, :617:21]
wire [3:0] _s0_req_T_5_0_uop_ldq_idx = wb_fire ? 4'h0 : _s0_req_T_4_0_uop_ldq_idx; // @[dcache.scala:563:38, :616:21, :617:21]
wire [3:0] _s0_req_T_5_0_uop_stq_idx = wb_fire ? 4'h0 : _s0_req_T_4_0_uop_stq_idx; // @[dcache.scala:563:38, :616:21, :617:21]
wire [1:0] _s0_req_T_5_0_uop_rxq_idx = wb_fire ? 2'h0 : _s0_req_T_4_0_uop_rxq_idx; // @[dcache.scala:563:38, :616:21, :617:21]
wire [5:0] _s0_req_T_5_0_uop_pdst = wb_fire ? 6'h0 : _s0_req_T_4_0_uop_pdst; // @[dcache.scala:563:38, :616:21, :617:21]
wire [5:0] _s0_req_T_5_0_uop_prs1 = wb_fire ? 6'h0 : _s0_req_T_4_0_uop_prs1; // @[dcache.scala:563:38, :616:21, :617:21]
wire [5:0] _s0_req_T_5_0_uop_prs2 = wb_fire ? 6'h0 : _s0_req_T_4_0_uop_prs2; // @[dcache.scala:563:38, :616:21, :617:21]
wire [5:0] _s0_req_T_5_0_uop_prs3 = wb_fire ? 6'h0 : _s0_req_T_4_0_uop_prs3; // @[dcache.scala:563:38, :616:21, :617:21]
wire [3:0] _s0_req_T_5_0_uop_ppred = wb_fire ? 4'h0 : _s0_req_T_4_0_uop_ppred; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_prs1_busy = ~wb_fire & _s0_req_T_4_0_uop_prs1_busy; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_prs2_busy = ~wb_fire & _s0_req_T_4_0_uop_prs2_busy; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_prs3_busy = ~wb_fire & _s0_req_T_4_0_uop_prs3_busy; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_ppred_busy = ~wb_fire & _s0_req_T_4_0_uop_ppred_busy; // @[dcache.scala:563:38, :616:21, :617:21]
wire [5:0] _s0_req_T_5_0_uop_stale_pdst = wb_fire ? 6'h0 : _s0_req_T_4_0_uop_stale_pdst; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_exception = ~wb_fire & _s0_req_T_4_0_uop_exception; // @[dcache.scala:563:38, :616:21, :617:21]
wire [63:0] _s0_req_T_5_0_uop_exc_cause = wb_fire ? 64'h0 : _s0_req_T_4_0_uop_exc_cause; // @[dcache.scala:563:38, :616:21, :617:21]
wire [4:0] _s0_req_T_5_0_uop_mem_cmd = wb_fire ? 5'h0 : _s0_req_T_4_0_uop_mem_cmd; // @[dcache.scala:563:38, :616:21, :617:21]
wire [1:0] _s0_req_T_5_0_uop_mem_size = wb_fire ? 2'h0 : _s0_req_T_4_0_uop_mem_size; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_mem_signed = ~wb_fire & _s0_req_T_4_0_uop_mem_signed; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_uses_ldq = ~wb_fire & _s0_req_T_4_0_uop_uses_ldq; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_uses_stq = ~wb_fire & _s0_req_T_4_0_uop_uses_stq; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_is_unique = ~wb_fire & _s0_req_T_4_0_uop_is_unique; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_flush_on_commit = ~wb_fire & _s0_req_T_4_0_uop_flush_on_commit; // @[dcache.scala:563:38, :616:21, :617:21]
wire [2:0] _s0_req_T_5_0_uop_csr_cmd = wb_fire ? 3'h0 : _s0_req_T_4_0_uop_csr_cmd; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_ldst_is_rs1 = ~wb_fire & _s0_req_T_4_0_uop_ldst_is_rs1; // @[dcache.scala:563:38, :616:21, :617:21]
wire [5:0] _s0_req_T_5_0_uop_ldst = wb_fire ? 6'h0 : _s0_req_T_4_0_uop_ldst; // @[dcache.scala:563:38, :616:21, :617:21]
wire [5:0] _s0_req_T_5_0_uop_lrs1 = wb_fire ? 6'h0 : _s0_req_T_4_0_uop_lrs1; // @[dcache.scala:563:38, :616:21, :617:21]
wire [5:0] _s0_req_T_5_0_uop_lrs2 = wb_fire ? 6'h0 : _s0_req_T_4_0_uop_lrs2; // @[dcache.scala:563:38, :616:21, :617:21]
wire [5:0] _s0_req_T_5_0_uop_lrs3 = wb_fire ? 6'h0 : _s0_req_T_4_0_uop_lrs3; // @[dcache.scala:563:38, :616:21, :617:21]
wire [1:0] _s0_req_T_5_0_uop_dst_rtype = wb_fire ? 2'h0 : _s0_req_T_4_0_uop_dst_rtype; // @[dcache.scala:563:38, :616:21, :617:21]
wire [1:0] _s0_req_T_5_0_uop_lrs1_rtype = wb_fire ? 2'h0 : _s0_req_T_4_0_uop_lrs1_rtype; // @[dcache.scala:563:38, :616:21, :617:21]
wire [1:0] _s0_req_T_5_0_uop_lrs2_rtype = wb_fire ? 2'h0 : _s0_req_T_4_0_uop_lrs2_rtype; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_frs3_en = ~wb_fire & _s0_req_T_4_0_uop_frs3_en; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fcn_dw = ~wb_fire & _s0_req_T_4_0_uop_fcn_dw; // @[dcache.scala:563:38, :616:21, :617:21]
wire [4:0] _s0_req_T_5_0_uop_fcn_op = wb_fire ? 5'h0 : _s0_req_T_4_0_uop_fcn_op; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_fp_val = ~wb_fire & _s0_req_T_4_0_uop_fp_val; // @[dcache.scala:563:38, :616:21, :617:21]
wire [2:0] _s0_req_T_5_0_uop_fp_rm = wb_fire ? 3'h0 : _s0_req_T_4_0_uop_fp_rm; // @[dcache.scala:563:38, :616:21, :617:21]
wire [1:0] _s0_req_T_5_0_uop_fp_typ = wb_fire ? 2'h0 : _s0_req_T_4_0_uop_fp_typ; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_xcpt_pf_if = ~wb_fire & _s0_req_T_4_0_uop_xcpt_pf_if; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_xcpt_ae_if = ~wb_fire & _s0_req_T_4_0_uop_xcpt_ae_if; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_xcpt_ma_if = ~wb_fire & _s0_req_T_4_0_uop_xcpt_ma_if; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_bp_debug_if = ~wb_fire & _s0_req_T_4_0_uop_bp_debug_if; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_uop_bp_xcpt_if = ~wb_fire & _s0_req_T_4_0_uop_bp_xcpt_if; // @[dcache.scala:563:38, :616:21, :617:21]
wire [2:0] _s0_req_T_5_0_uop_debug_fsrc = wb_fire ? 3'h0 : _s0_req_T_4_0_uop_debug_fsrc; // @[dcache.scala:563:38, :616:21, :617:21]
wire [2:0] _s0_req_T_5_0_uop_debug_tsrc = wb_fire ? 3'h0 : _s0_req_T_4_0_uop_debug_tsrc; // @[dcache.scala:563:38, :616:21, :617:21]
wire [33:0] _s0_req_T_5_0_addr = wb_fire ? wb_req_0_addr : _s0_req_T_4_0_addr; // @[dcache.scala:563:38, :564:20, :616:21, :617:21]
wire [63:0] _s0_req_T_5_0_data = wb_fire ? 64'h0 : _s0_req_T_4_0_data; // @[dcache.scala:563:38, :616:21, :617:21]
wire _s0_req_T_5_0_is_hella = ~wb_fire & _s0_req_T_4_0_is_hella; // @[dcache.scala:563:38, :616:21, :617:21]
wire [31:0] s0_req_0_uop_inst = _s0_req_T ? _s0_req_WIRE_0_uop_inst : _s0_req_T_5_0_uop_inst; // @[Decoupled.scala:51:35]
wire [31:0] s0_req_0_uop_debug_inst = _s0_req_T ? _s0_req_WIRE_0_uop_debug_inst : _s0_req_T_5_0_uop_debug_inst; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_is_rvc = _s0_req_T ? _s0_req_WIRE_0_uop_is_rvc : _s0_req_T_5_0_uop_is_rvc; // @[Decoupled.scala:51:35]
wire [33:0] s0_req_0_uop_debug_pc = _s0_req_T ? _s0_req_WIRE_0_uop_debug_pc : _s0_req_T_5_0_uop_debug_pc; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_iq_type_0 = _s0_req_T ? _s0_req_WIRE_0_uop_iq_type_0 : _s0_req_T_5_0_uop_iq_type_0; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_iq_type_1 = _s0_req_T ? _s0_req_WIRE_0_uop_iq_type_1 : _s0_req_T_5_0_uop_iq_type_1; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_iq_type_2 = _s0_req_T ? _s0_req_WIRE_0_uop_iq_type_2 : _s0_req_T_5_0_uop_iq_type_2; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_iq_type_3 = _s0_req_T ? _s0_req_WIRE_0_uop_iq_type_3 : _s0_req_T_5_0_uop_iq_type_3; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fu_code_0 = _s0_req_T ? _s0_req_WIRE_0_uop_fu_code_0 : _s0_req_T_5_0_uop_fu_code_0; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fu_code_1 = _s0_req_T ? _s0_req_WIRE_0_uop_fu_code_1 : _s0_req_T_5_0_uop_fu_code_1; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fu_code_2 = _s0_req_T ? _s0_req_WIRE_0_uop_fu_code_2 : _s0_req_T_5_0_uop_fu_code_2; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fu_code_3 = _s0_req_T ? _s0_req_WIRE_0_uop_fu_code_3 : _s0_req_T_5_0_uop_fu_code_3; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fu_code_4 = _s0_req_T ? _s0_req_WIRE_0_uop_fu_code_4 : _s0_req_T_5_0_uop_fu_code_4; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fu_code_5 = _s0_req_T ? _s0_req_WIRE_0_uop_fu_code_5 : _s0_req_T_5_0_uop_fu_code_5; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fu_code_6 = _s0_req_T ? _s0_req_WIRE_0_uop_fu_code_6 : _s0_req_T_5_0_uop_fu_code_6; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fu_code_7 = _s0_req_T ? _s0_req_WIRE_0_uop_fu_code_7 : _s0_req_T_5_0_uop_fu_code_7; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fu_code_8 = _s0_req_T ? _s0_req_WIRE_0_uop_fu_code_8 : _s0_req_T_5_0_uop_fu_code_8; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fu_code_9 = _s0_req_T ? _s0_req_WIRE_0_uop_fu_code_9 : _s0_req_T_5_0_uop_fu_code_9; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_iw_issued = _s0_req_T ? _s0_req_WIRE_0_uop_iw_issued : _s0_req_T_5_0_uop_iw_issued; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_iw_issued_partial_agen = _s0_req_T ? _s0_req_WIRE_0_uop_iw_issued_partial_agen : _s0_req_T_5_0_uop_iw_issued_partial_agen; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_iw_issued_partial_dgen = _s0_req_T ? _s0_req_WIRE_0_uop_iw_issued_partial_dgen : _s0_req_T_5_0_uop_iw_issued_partial_dgen; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_iw_p1_speculative_child = _s0_req_T ? _s0_req_WIRE_0_uop_iw_p1_speculative_child : _s0_req_T_5_0_uop_iw_p1_speculative_child; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_iw_p2_speculative_child = _s0_req_T ? _s0_req_WIRE_0_uop_iw_p2_speculative_child : _s0_req_T_5_0_uop_iw_p2_speculative_child; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_iw_p1_bypass_hint = _s0_req_T ? _s0_req_WIRE_0_uop_iw_p1_bypass_hint : _s0_req_T_5_0_uop_iw_p1_bypass_hint; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_iw_p2_bypass_hint = _s0_req_T ? _s0_req_WIRE_0_uop_iw_p2_bypass_hint : _s0_req_T_5_0_uop_iw_p2_bypass_hint; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_iw_p3_bypass_hint = _s0_req_T ? _s0_req_WIRE_0_uop_iw_p3_bypass_hint : _s0_req_T_5_0_uop_iw_p3_bypass_hint; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_dis_col_sel = _s0_req_T ? _s0_req_WIRE_0_uop_dis_col_sel : _s0_req_T_5_0_uop_dis_col_sel; // @[Decoupled.scala:51:35]
wire [3:0] s0_req_0_uop_br_mask = _s0_req_T ? _s0_req_WIRE_0_uop_br_mask : _s0_req_T_5_0_uop_br_mask; // @[Decoupled.scala:51:35]
wire [1:0] s0_req_0_uop_br_tag = _s0_req_T ? _s0_req_WIRE_0_uop_br_tag : _s0_req_T_5_0_uop_br_tag; // @[Decoupled.scala:51:35]
wire [3:0] s0_req_0_uop_br_type = _s0_req_T ? _s0_req_WIRE_0_uop_br_type : _s0_req_T_5_0_uop_br_type; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_is_sfb = _s0_req_T ? _s0_req_WIRE_0_uop_is_sfb : _s0_req_T_5_0_uop_is_sfb; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_is_fence = _s0_req_T ? _s0_req_WIRE_0_uop_is_fence : _s0_req_T_5_0_uop_is_fence; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_is_fencei = _s0_req_T ? _s0_req_WIRE_0_uop_is_fencei : _s0_req_T_5_0_uop_is_fencei; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_is_sfence = _s0_req_T ? _s0_req_WIRE_0_uop_is_sfence : _s0_req_T_5_0_uop_is_sfence; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_is_amo = _s0_req_T ? _s0_req_WIRE_0_uop_is_amo : _s0_req_T_5_0_uop_is_amo; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_is_eret = _s0_req_T ? _s0_req_WIRE_0_uop_is_eret : _s0_req_T_5_0_uop_is_eret; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_is_sys_pc2epc = _s0_req_T ? _s0_req_WIRE_0_uop_is_sys_pc2epc : _s0_req_T_5_0_uop_is_sys_pc2epc; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_is_rocc = _s0_req_T ? _s0_req_WIRE_0_uop_is_rocc : _s0_req_T_5_0_uop_is_rocc; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_is_mov = _s0_req_T ? _s0_req_WIRE_0_uop_is_mov : _s0_req_T_5_0_uop_is_mov; // @[Decoupled.scala:51:35]
wire [3:0] s0_req_0_uop_ftq_idx = _s0_req_T ? _s0_req_WIRE_0_uop_ftq_idx : _s0_req_T_5_0_uop_ftq_idx; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_edge_inst = _s0_req_T ? _s0_req_WIRE_0_uop_edge_inst : _s0_req_T_5_0_uop_edge_inst; // @[Decoupled.scala:51:35]
wire [5:0] s0_req_0_uop_pc_lob = _s0_req_T ? _s0_req_WIRE_0_uop_pc_lob : _s0_req_T_5_0_uop_pc_lob; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_taken = _s0_req_T ? _s0_req_WIRE_0_uop_taken : _s0_req_T_5_0_uop_taken; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_imm_rename = _s0_req_T ? _s0_req_WIRE_0_uop_imm_rename : _s0_req_T_5_0_uop_imm_rename; // @[Decoupled.scala:51:35]
wire [2:0] s0_req_0_uop_imm_sel = _s0_req_T ? _s0_req_WIRE_0_uop_imm_sel : _s0_req_T_5_0_uop_imm_sel; // @[Decoupled.scala:51:35]
wire [4:0] s0_req_0_uop_pimm = _s0_req_T ? _s0_req_WIRE_0_uop_pimm : _s0_req_T_5_0_uop_pimm; // @[Decoupled.scala:51:35]
wire [19:0] s0_req_0_uop_imm_packed = _s0_req_T ? _s0_req_WIRE_0_uop_imm_packed : _s0_req_T_5_0_uop_imm_packed; // @[Decoupled.scala:51:35]
wire [1:0] s0_req_0_uop_op1_sel = _s0_req_T ? _s0_req_WIRE_0_uop_op1_sel : _s0_req_T_5_0_uop_op1_sel; // @[Decoupled.scala:51:35]
wire [2:0] s0_req_0_uop_op2_sel = _s0_req_T ? _s0_req_WIRE_0_uop_op2_sel : _s0_req_T_5_0_uop_op2_sel; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fp_ctrl_ldst = _s0_req_T ? _s0_req_WIRE_0_uop_fp_ctrl_ldst : _s0_req_T_5_0_uop_fp_ctrl_ldst; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fp_ctrl_wen = _s0_req_T ? _s0_req_WIRE_0_uop_fp_ctrl_wen : _s0_req_T_5_0_uop_fp_ctrl_wen; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fp_ctrl_ren1 = _s0_req_T ? _s0_req_WIRE_0_uop_fp_ctrl_ren1 : _s0_req_T_5_0_uop_fp_ctrl_ren1; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fp_ctrl_ren2 = _s0_req_T ? _s0_req_WIRE_0_uop_fp_ctrl_ren2 : _s0_req_T_5_0_uop_fp_ctrl_ren2; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fp_ctrl_ren3 = _s0_req_T ? _s0_req_WIRE_0_uop_fp_ctrl_ren3 : _s0_req_T_5_0_uop_fp_ctrl_ren3; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fp_ctrl_swap12 = _s0_req_T ? _s0_req_WIRE_0_uop_fp_ctrl_swap12 : _s0_req_T_5_0_uop_fp_ctrl_swap12; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fp_ctrl_swap23 = _s0_req_T ? _s0_req_WIRE_0_uop_fp_ctrl_swap23 : _s0_req_T_5_0_uop_fp_ctrl_swap23; // @[Decoupled.scala:51:35]
wire [1:0] s0_req_0_uop_fp_ctrl_typeTagIn = _s0_req_T ? _s0_req_WIRE_0_uop_fp_ctrl_typeTagIn : _s0_req_T_5_0_uop_fp_ctrl_typeTagIn; // @[Decoupled.scala:51:35]
wire [1:0] s0_req_0_uop_fp_ctrl_typeTagOut = _s0_req_T ? _s0_req_WIRE_0_uop_fp_ctrl_typeTagOut : _s0_req_T_5_0_uop_fp_ctrl_typeTagOut; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fp_ctrl_fromint = _s0_req_T ? _s0_req_WIRE_0_uop_fp_ctrl_fromint : _s0_req_T_5_0_uop_fp_ctrl_fromint; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fp_ctrl_toint = _s0_req_T ? _s0_req_WIRE_0_uop_fp_ctrl_toint : _s0_req_T_5_0_uop_fp_ctrl_toint; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fp_ctrl_fastpipe = _s0_req_T ? _s0_req_WIRE_0_uop_fp_ctrl_fastpipe : _s0_req_T_5_0_uop_fp_ctrl_fastpipe; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fp_ctrl_fma = _s0_req_T ? _s0_req_WIRE_0_uop_fp_ctrl_fma : _s0_req_T_5_0_uop_fp_ctrl_fma; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fp_ctrl_div = _s0_req_T ? _s0_req_WIRE_0_uop_fp_ctrl_div : _s0_req_T_5_0_uop_fp_ctrl_div; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fp_ctrl_sqrt = _s0_req_T ? _s0_req_WIRE_0_uop_fp_ctrl_sqrt : _s0_req_T_5_0_uop_fp_ctrl_sqrt; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fp_ctrl_wflags = _s0_req_T ? _s0_req_WIRE_0_uop_fp_ctrl_wflags : _s0_req_T_5_0_uop_fp_ctrl_wflags; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fp_ctrl_vec = _s0_req_T ? _s0_req_WIRE_0_uop_fp_ctrl_vec : _s0_req_T_5_0_uop_fp_ctrl_vec; // @[Decoupled.scala:51:35]
wire [4:0] s0_req_0_uop_rob_idx = _s0_req_T ? _s0_req_WIRE_0_uop_rob_idx : _s0_req_T_5_0_uop_rob_idx; // @[Decoupled.scala:51:35]
wire [3:0] s0_req_0_uop_ldq_idx = _s0_req_T ? _s0_req_WIRE_0_uop_ldq_idx : _s0_req_T_5_0_uop_ldq_idx; // @[Decoupled.scala:51:35]
wire [3:0] s0_req_0_uop_stq_idx = _s0_req_T ? _s0_req_WIRE_0_uop_stq_idx : _s0_req_T_5_0_uop_stq_idx; // @[Decoupled.scala:51:35]
wire [1:0] s0_req_0_uop_rxq_idx = _s0_req_T ? _s0_req_WIRE_0_uop_rxq_idx : _s0_req_T_5_0_uop_rxq_idx; // @[Decoupled.scala:51:35]
wire [5:0] s0_req_0_uop_pdst = _s0_req_T ? _s0_req_WIRE_0_uop_pdst : _s0_req_T_5_0_uop_pdst; // @[Decoupled.scala:51:35]
wire [5:0] s0_req_0_uop_prs1 = _s0_req_T ? _s0_req_WIRE_0_uop_prs1 : _s0_req_T_5_0_uop_prs1; // @[Decoupled.scala:51:35]
wire [5:0] s0_req_0_uop_prs2 = _s0_req_T ? _s0_req_WIRE_0_uop_prs2 : _s0_req_T_5_0_uop_prs2; // @[Decoupled.scala:51:35]
wire [5:0] s0_req_0_uop_prs3 = _s0_req_T ? _s0_req_WIRE_0_uop_prs3 : _s0_req_T_5_0_uop_prs3; // @[Decoupled.scala:51:35]
wire [3:0] s0_req_0_uop_ppred = _s0_req_T ? _s0_req_WIRE_0_uop_ppred : _s0_req_T_5_0_uop_ppred; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_prs1_busy = _s0_req_T ? _s0_req_WIRE_0_uop_prs1_busy : _s0_req_T_5_0_uop_prs1_busy; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_prs2_busy = _s0_req_T ? _s0_req_WIRE_0_uop_prs2_busy : _s0_req_T_5_0_uop_prs2_busy; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_prs3_busy = _s0_req_T ? _s0_req_WIRE_0_uop_prs3_busy : _s0_req_T_5_0_uop_prs3_busy; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_ppred_busy = _s0_req_T ? _s0_req_WIRE_0_uop_ppred_busy : _s0_req_T_5_0_uop_ppred_busy; // @[Decoupled.scala:51:35]
wire [5:0] s0_req_0_uop_stale_pdst = _s0_req_T ? _s0_req_WIRE_0_uop_stale_pdst : _s0_req_T_5_0_uop_stale_pdst; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_exception = _s0_req_T ? _s0_req_WIRE_0_uop_exception : _s0_req_T_5_0_uop_exception; // @[Decoupled.scala:51:35]
wire [63:0] s0_req_0_uop_exc_cause = _s0_req_T ? _s0_req_WIRE_0_uop_exc_cause : _s0_req_T_5_0_uop_exc_cause; // @[Decoupled.scala:51:35]
wire [4:0] s0_req_0_uop_mem_cmd = _s0_req_T ? _s0_req_WIRE_0_uop_mem_cmd : _s0_req_T_5_0_uop_mem_cmd; // @[Decoupled.scala:51:35]
wire [1:0] s0_req_0_uop_mem_size = _s0_req_T ? _s0_req_WIRE_0_uop_mem_size : _s0_req_T_5_0_uop_mem_size; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_mem_signed = _s0_req_T ? _s0_req_WIRE_0_uop_mem_signed : _s0_req_T_5_0_uop_mem_signed; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_uses_ldq = _s0_req_T ? _s0_req_WIRE_0_uop_uses_ldq : _s0_req_T_5_0_uop_uses_ldq; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_uses_stq = _s0_req_T ? _s0_req_WIRE_0_uop_uses_stq : _s0_req_T_5_0_uop_uses_stq; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_is_unique = _s0_req_T ? _s0_req_WIRE_0_uop_is_unique : _s0_req_T_5_0_uop_is_unique; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_flush_on_commit = _s0_req_T ? _s0_req_WIRE_0_uop_flush_on_commit : _s0_req_T_5_0_uop_flush_on_commit; // @[Decoupled.scala:51:35]
wire [2:0] s0_req_0_uop_csr_cmd = _s0_req_T ? _s0_req_WIRE_0_uop_csr_cmd : _s0_req_T_5_0_uop_csr_cmd; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_ldst_is_rs1 = _s0_req_T ? _s0_req_WIRE_0_uop_ldst_is_rs1 : _s0_req_T_5_0_uop_ldst_is_rs1; // @[Decoupled.scala:51:35]
wire [5:0] s0_req_0_uop_ldst = _s0_req_T ? _s0_req_WIRE_0_uop_ldst : _s0_req_T_5_0_uop_ldst; // @[Decoupled.scala:51:35]
wire [5:0] s0_req_0_uop_lrs1 = _s0_req_T ? _s0_req_WIRE_0_uop_lrs1 : _s0_req_T_5_0_uop_lrs1; // @[Decoupled.scala:51:35]
wire [5:0] s0_req_0_uop_lrs2 = _s0_req_T ? _s0_req_WIRE_0_uop_lrs2 : _s0_req_T_5_0_uop_lrs2; // @[Decoupled.scala:51:35]
wire [5:0] s0_req_0_uop_lrs3 = _s0_req_T ? _s0_req_WIRE_0_uop_lrs3 : _s0_req_T_5_0_uop_lrs3; // @[Decoupled.scala:51:35]
wire [1:0] s0_req_0_uop_dst_rtype = _s0_req_T ? _s0_req_WIRE_0_uop_dst_rtype : _s0_req_T_5_0_uop_dst_rtype; // @[Decoupled.scala:51:35]
wire [1:0] s0_req_0_uop_lrs1_rtype = _s0_req_T ? _s0_req_WIRE_0_uop_lrs1_rtype : _s0_req_T_5_0_uop_lrs1_rtype; // @[Decoupled.scala:51:35]
wire [1:0] s0_req_0_uop_lrs2_rtype = _s0_req_T ? _s0_req_WIRE_0_uop_lrs2_rtype : _s0_req_T_5_0_uop_lrs2_rtype; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_frs3_en = _s0_req_T ? _s0_req_WIRE_0_uop_frs3_en : _s0_req_T_5_0_uop_frs3_en; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fcn_dw = _s0_req_T ? _s0_req_WIRE_0_uop_fcn_dw : _s0_req_T_5_0_uop_fcn_dw; // @[Decoupled.scala:51:35]
wire [4:0] s0_req_0_uop_fcn_op = _s0_req_T ? _s0_req_WIRE_0_uop_fcn_op : _s0_req_T_5_0_uop_fcn_op; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_fp_val = _s0_req_T ? _s0_req_WIRE_0_uop_fp_val : _s0_req_T_5_0_uop_fp_val; // @[Decoupled.scala:51:35]
wire [2:0] s0_req_0_uop_fp_rm = _s0_req_T ? _s0_req_WIRE_0_uop_fp_rm : _s0_req_T_5_0_uop_fp_rm; // @[Decoupled.scala:51:35]
wire [1:0] s0_req_0_uop_fp_typ = _s0_req_T ? _s0_req_WIRE_0_uop_fp_typ : _s0_req_T_5_0_uop_fp_typ; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_xcpt_pf_if = _s0_req_T ? _s0_req_WIRE_0_uop_xcpt_pf_if : _s0_req_T_5_0_uop_xcpt_pf_if; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_xcpt_ae_if = _s0_req_T ? _s0_req_WIRE_0_uop_xcpt_ae_if : _s0_req_T_5_0_uop_xcpt_ae_if; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_xcpt_ma_if = _s0_req_T ? _s0_req_WIRE_0_uop_xcpt_ma_if : _s0_req_T_5_0_uop_xcpt_ma_if; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_bp_debug_if = _s0_req_T ? _s0_req_WIRE_0_uop_bp_debug_if : _s0_req_T_5_0_uop_bp_debug_if; // @[Decoupled.scala:51:35]
wire s0_req_0_uop_bp_xcpt_if = _s0_req_T ? _s0_req_WIRE_0_uop_bp_xcpt_if : _s0_req_T_5_0_uop_bp_xcpt_if; // @[Decoupled.scala:51:35]
wire [2:0] s0_req_0_uop_debug_fsrc = _s0_req_T ? _s0_req_WIRE_0_uop_debug_fsrc : _s0_req_T_5_0_uop_debug_fsrc; // @[Decoupled.scala:51:35]
wire [2:0] s0_req_0_uop_debug_tsrc = _s0_req_T ? _s0_req_WIRE_0_uop_debug_tsrc : _s0_req_T_5_0_uop_debug_tsrc; // @[Decoupled.scala:51:35]
wire [33:0] s0_req_0_addr = _s0_req_T ? _s0_req_WIRE_0_addr : _s0_req_T_5_0_addr; // @[Decoupled.scala:51:35]
wire [63:0] s0_req_0_data = _s0_req_T ? _s0_req_WIRE_0_data : _s0_req_T_5_0_data; // @[Decoupled.scala:51:35]
wire s0_req_0_is_hella = _s0_req_T ? _s0_req_WIRE_0_is_hella : _s0_req_T_5_0_is_hella; // @[Decoupled.scala:51:35]
wire [3:0] _s1_req_0_uop_br_mask_T_1 = s0_req_0_uop_br_mask; // @[util.scala:93:25]
wire [2:0] _s0_type_T_2 = _s0_type_T_1 ? 3'h3 : 3'h0; // @[Decoupled.scala:51:35]
wire [2:0] _s0_type_T_3 = _s0_type_T_2; // @[dcache.scala:624:21, :625:21]
wire [2:0] _s0_type_T_4 = prober_fire ? 3'h1 : _s0_type_T_3; // @[Decoupled.scala:51:35]
wire [2:0] _s0_type_T_5 = wb_fire ? 3'h2 : _s0_type_T_4; // @[dcache.scala:563:38, :622:21, :623:21]
wire [2:0] s0_type = _s0_type_T ? 3'h4 : _s0_type_T_5; // @[Decoupled.scala:51:35]
wire _s0_send_resp_or_nack_T_2 = _mshrs_io_replay_bits_uop_mem_cmd == 5'h0; // @[package.scala:16:47]
wire _s0_send_resp_or_nack_T_3 = _mshrs_io_replay_bits_uop_mem_cmd == 5'h10; // @[package.scala:16:47]
wire _s0_send_resp_or_nack_T_4 = _mshrs_io_replay_bits_uop_mem_cmd == 5'h6; // @[package.scala:16:47]
wire _s0_send_resp_or_nack_T_5 = _mshrs_io_replay_bits_uop_mem_cmd == 5'h7; // @[package.scala:16:47]
wire _s0_send_resp_or_nack_T_6 = _s0_send_resp_or_nack_T_2 | _s0_send_resp_or_nack_T_3; // @[package.scala:16:47, :81:59]
wire _s0_send_resp_or_nack_T_7 = _s0_send_resp_or_nack_T_6 | _s0_send_resp_or_nack_T_4; // @[package.scala:16:47, :81:59]
wire _s0_send_resp_or_nack_T_8 = _s0_send_resp_or_nack_T_7 | _s0_send_resp_or_nack_T_5; // @[package.scala:16:47, :81:59]
wire _s0_send_resp_or_nack_T_9 = _mshrs_io_replay_bits_uop_mem_cmd == 5'h4; // @[package.scala:16:47]
wire _s0_send_resp_or_nack_T_10 = _mshrs_io_replay_bits_uop_mem_cmd == 5'h9; // @[package.scala:16:47]
wire _s0_send_resp_or_nack_T_11 = _mshrs_io_replay_bits_uop_mem_cmd == 5'hA; // @[package.scala:16:47]
wire _s0_send_resp_or_nack_T_12 = _mshrs_io_replay_bits_uop_mem_cmd == 5'hB; // @[package.scala:16:47]
wire _s0_send_resp_or_nack_T_13 = _s0_send_resp_or_nack_T_9 | _s0_send_resp_or_nack_T_10; // @[package.scala:16:47, :81:59]
wire _s0_send_resp_or_nack_T_14 = _s0_send_resp_or_nack_T_13 | _s0_send_resp_or_nack_T_11; // @[package.scala:16:47, :81:59]
wire _s0_send_resp_or_nack_T_15 = _s0_send_resp_or_nack_T_14 | _s0_send_resp_or_nack_T_12; // @[package.scala:16:47, :81:59]
wire _s0_send_resp_or_nack_T_16 = _mshrs_io_replay_bits_uop_mem_cmd == 5'h8; // @[package.scala:16:47]
wire _s0_send_resp_or_nack_T_17 = _mshrs_io_replay_bits_uop_mem_cmd == 5'hC; // @[package.scala:16:47]
wire _s0_send_resp_or_nack_T_18 = _mshrs_io_replay_bits_uop_mem_cmd == 5'hD; // @[package.scala:16:47]
wire _s0_send_resp_or_nack_T_19 = _mshrs_io_replay_bits_uop_mem_cmd == 5'hE; // @[package.scala:16:47]
wire _s0_send_resp_or_nack_T_20 = _mshrs_io_replay_bits_uop_mem_cmd == 5'hF; // @[package.scala:16:47]
wire _s0_send_resp_or_nack_T_21 = _s0_send_resp_or_nack_T_16 | _s0_send_resp_or_nack_T_17; // @[package.scala:16:47, :81:59]
wire _s0_send_resp_or_nack_T_22 = _s0_send_resp_or_nack_T_21 | _s0_send_resp_or_nack_T_18; // @[package.scala:16:47, :81:59]
wire _s0_send_resp_or_nack_T_23 = _s0_send_resp_or_nack_T_22 | _s0_send_resp_or_nack_T_19; // @[package.scala:16:47, :81:59]
wire _s0_send_resp_or_nack_T_24 = _s0_send_resp_or_nack_T_23 | _s0_send_resp_or_nack_T_20; // @[package.scala:16:47, :81:59]
wire _s0_send_resp_or_nack_T_25 = _s0_send_resp_or_nack_T_15 | _s0_send_resp_or_nack_T_24; // @[package.scala:81:59]
wire _s0_send_resp_or_nack_T_26 = _s0_send_resp_or_nack_T_8 | _s0_send_resp_or_nack_T_25; // @[package.scala:81:59]
wire _s0_send_resp_or_nack_T_27 = _s0_send_resp_or_nack_T_1 & _s0_send_resp_or_nack_T_26; // @[Decoupled.scala:51:35]
wire _s0_send_resp_or_nack_T_28 = _s0_send_resp_or_nack_T_27; // @[dcache.scala:630:{16,38}]
wire _s0_send_resp_or_nack_T_29 = _s0_send_resp_or_nack_T_28; // @[dcache.scala:630:{16,117}]
wire _s0_send_resp_or_nack_WIRE_0 = _s0_send_resp_or_nack_T_29; // @[dcache.scala:630:{12,117}]
wire s0_send_resp_or_nack_0 = _s0_send_resp_or_nack_T ? s0_valid_0 : _s0_send_resp_or_nack_WIRE_0; // @[Decoupled.scala:51:35]
reg [31:0] s1_req_0_uop_inst; // @[dcache.scala:633:32]
reg [31:0] s1_req_0_uop_debug_inst; // @[dcache.scala:633:32]
reg s1_req_0_uop_is_rvc; // @[dcache.scala:633:32]
reg [33:0] s1_req_0_uop_debug_pc; // @[dcache.scala:633:32]
reg s1_req_0_uop_iq_type_0; // @[dcache.scala:633:32]
reg s1_req_0_uop_iq_type_1; // @[dcache.scala:633:32]
reg s1_req_0_uop_iq_type_2; // @[dcache.scala:633:32]
reg s1_req_0_uop_iq_type_3; // @[dcache.scala:633:32]
reg s1_req_0_uop_fu_code_0; // @[dcache.scala:633:32]
reg s1_req_0_uop_fu_code_1; // @[dcache.scala:633:32]
reg s1_req_0_uop_fu_code_2; // @[dcache.scala:633:32]
reg s1_req_0_uop_fu_code_3; // @[dcache.scala:633:32]
reg s1_req_0_uop_fu_code_4; // @[dcache.scala:633:32]
reg s1_req_0_uop_fu_code_5; // @[dcache.scala:633:32]
reg s1_req_0_uop_fu_code_6; // @[dcache.scala:633:32]
reg s1_req_0_uop_fu_code_7; // @[dcache.scala:633:32]
reg s1_req_0_uop_fu_code_8; // @[dcache.scala:633:32]
reg s1_req_0_uop_fu_code_9; // @[dcache.scala:633:32]
reg s1_req_0_uop_iw_issued; // @[dcache.scala:633:32]
reg s1_req_0_uop_iw_issued_partial_agen; // @[dcache.scala:633:32]
reg s1_req_0_uop_iw_issued_partial_dgen; // @[dcache.scala:633:32]
reg s1_req_0_uop_iw_p1_speculative_child; // @[dcache.scala:633:32]
reg s1_req_0_uop_iw_p2_speculative_child; // @[dcache.scala:633:32]
reg s1_req_0_uop_iw_p1_bypass_hint; // @[dcache.scala:633:32]
reg s1_req_0_uop_iw_p2_bypass_hint; // @[dcache.scala:633:32]
reg s1_req_0_uop_iw_p3_bypass_hint; // @[dcache.scala:633:32]
reg s1_req_0_uop_dis_col_sel; // @[dcache.scala:633:32]
reg [3:0] s1_req_0_uop_br_mask; // @[dcache.scala:633:32]
wire [3:0] _s2_req_0_uop_br_mask_T_1 = s1_req_0_uop_br_mask; // @[util.scala:93:25]
reg [1:0] s1_req_0_uop_br_tag; // @[dcache.scala:633:32]
reg [3:0] s1_req_0_uop_br_type; // @[dcache.scala:633:32]
reg s1_req_0_uop_is_sfb; // @[dcache.scala:633:32]
reg s1_req_0_uop_is_fence; // @[dcache.scala:633:32]
reg s1_req_0_uop_is_fencei; // @[dcache.scala:633:32]
reg s1_req_0_uop_is_sfence; // @[dcache.scala:633:32]
reg s1_req_0_uop_is_amo; // @[dcache.scala:633:32]
reg s1_req_0_uop_is_eret; // @[dcache.scala:633:32]
reg s1_req_0_uop_is_sys_pc2epc; // @[dcache.scala:633:32]
reg s1_req_0_uop_is_rocc; // @[dcache.scala:633:32]
reg s1_req_0_uop_is_mov; // @[dcache.scala:633:32]
reg [3:0] s1_req_0_uop_ftq_idx; // @[dcache.scala:633:32]
reg s1_req_0_uop_edge_inst; // @[dcache.scala:633:32]
reg [5:0] s1_req_0_uop_pc_lob; // @[dcache.scala:633:32]
reg s1_req_0_uop_taken; // @[dcache.scala:633:32]
reg s1_req_0_uop_imm_rename; // @[dcache.scala:633:32]
reg [2:0] s1_req_0_uop_imm_sel; // @[dcache.scala:633:32]
reg [4:0] s1_req_0_uop_pimm; // @[dcache.scala:633:32]
reg [19:0] s1_req_0_uop_imm_packed; // @[dcache.scala:633:32]
reg [1:0] s1_req_0_uop_op1_sel; // @[dcache.scala:633:32]
reg [2:0] s1_req_0_uop_op2_sel; // @[dcache.scala:633:32]
reg s1_req_0_uop_fp_ctrl_ldst; // @[dcache.scala:633:32]
reg s1_req_0_uop_fp_ctrl_wen; // @[dcache.scala:633:32]
reg s1_req_0_uop_fp_ctrl_ren1; // @[dcache.scala:633:32]
reg s1_req_0_uop_fp_ctrl_ren2; // @[dcache.scala:633:32]
reg s1_req_0_uop_fp_ctrl_ren3; // @[dcache.scala:633:32]
reg s1_req_0_uop_fp_ctrl_swap12; // @[dcache.scala:633:32]
reg s1_req_0_uop_fp_ctrl_swap23; // @[dcache.scala:633:32]
reg [1:0] s1_req_0_uop_fp_ctrl_typeTagIn; // @[dcache.scala:633:32]
reg [1:0] s1_req_0_uop_fp_ctrl_typeTagOut; // @[dcache.scala:633:32]
reg s1_req_0_uop_fp_ctrl_fromint; // @[dcache.scala:633:32]
reg s1_req_0_uop_fp_ctrl_toint; // @[dcache.scala:633:32]
reg s1_req_0_uop_fp_ctrl_fastpipe; // @[dcache.scala:633:32]
reg s1_req_0_uop_fp_ctrl_fma; // @[dcache.scala:633:32]
reg s1_req_0_uop_fp_ctrl_div; // @[dcache.scala:633:32]
reg s1_req_0_uop_fp_ctrl_sqrt; // @[dcache.scala:633:32]
reg s1_req_0_uop_fp_ctrl_wflags; // @[dcache.scala:633:32]
reg s1_req_0_uop_fp_ctrl_vec; // @[dcache.scala:633:32]
reg [4:0] s1_req_0_uop_rob_idx; // @[dcache.scala:633:32]
reg [3:0] s1_req_0_uop_ldq_idx; // @[dcache.scala:633:32]
reg [3:0] s1_req_0_uop_stq_idx; // @[dcache.scala:633:32]
reg [1:0] s1_req_0_uop_rxq_idx; // @[dcache.scala:633:32]
reg [5:0] s1_req_0_uop_pdst; // @[dcache.scala:633:32]
reg [5:0] s1_req_0_uop_prs1; // @[dcache.scala:633:32]
reg [5:0] s1_req_0_uop_prs2; // @[dcache.scala:633:32]
reg [5:0] s1_req_0_uop_prs3; // @[dcache.scala:633:32]
reg [3:0] s1_req_0_uop_ppred; // @[dcache.scala:633:32]
reg s1_req_0_uop_prs1_busy; // @[dcache.scala:633:32]
reg s1_req_0_uop_prs2_busy; // @[dcache.scala:633:32]
reg s1_req_0_uop_prs3_busy; // @[dcache.scala:633:32]
reg s1_req_0_uop_ppred_busy; // @[dcache.scala:633:32]
reg [5:0] s1_req_0_uop_stale_pdst; // @[dcache.scala:633:32]
reg s1_req_0_uop_exception; // @[dcache.scala:633:32]
reg [63:0] s1_req_0_uop_exc_cause; // @[dcache.scala:633:32]
reg [4:0] s1_req_0_uop_mem_cmd; // @[dcache.scala:633:32]
reg [1:0] s1_req_0_uop_mem_size; // @[dcache.scala:633:32]
reg s1_req_0_uop_mem_signed; // @[dcache.scala:633:32]
reg s1_req_0_uop_uses_ldq; // @[dcache.scala:633:32]
reg s1_req_0_uop_uses_stq; // @[dcache.scala:633:32]
reg s1_req_0_uop_is_unique; // @[dcache.scala:633:32]
reg s1_req_0_uop_flush_on_commit; // @[dcache.scala:633:32]
reg [2:0] s1_req_0_uop_csr_cmd; // @[dcache.scala:633:32]
reg s1_req_0_uop_ldst_is_rs1; // @[dcache.scala:633:32]
reg [5:0] s1_req_0_uop_ldst; // @[dcache.scala:633:32]
reg [5:0] s1_req_0_uop_lrs1; // @[dcache.scala:633:32]
reg [5:0] s1_req_0_uop_lrs2; // @[dcache.scala:633:32]
reg [5:0] s1_req_0_uop_lrs3; // @[dcache.scala:633:32]
reg [1:0] s1_req_0_uop_dst_rtype; // @[dcache.scala:633:32]
reg [1:0] s1_req_0_uop_lrs1_rtype; // @[dcache.scala:633:32]
reg [1:0] s1_req_0_uop_lrs2_rtype; // @[dcache.scala:633:32]
reg s1_req_0_uop_frs3_en; // @[dcache.scala:633:32]
reg s1_req_0_uop_fcn_dw; // @[dcache.scala:633:32]
reg [4:0] s1_req_0_uop_fcn_op; // @[dcache.scala:633:32]
reg s1_req_0_uop_fp_val; // @[dcache.scala:633:32]
reg [2:0] s1_req_0_uop_fp_rm; // @[dcache.scala:633:32]
reg [1:0] s1_req_0_uop_fp_typ; // @[dcache.scala:633:32]
reg s1_req_0_uop_xcpt_pf_if; // @[dcache.scala:633:32]
reg s1_req_0_uop_xcpt_ae_if; // @[dcache.scala:633:32]
reg s1_req_0_uop_xcpt_ma_if; // @[dcache.scala:633:32]
reg s1_req_0_uop_bp_debug_if; // @[dcache.scala:633:32]
reg s1_req_0_uop_bp_xcpt_if; // @[dcache.scala:633:32]
reg [2:0] s1_req_0_uop_debug_fsrc; // @[dcache.scala:633:32]
reg [2:0] s1_req_0_uop_debug_tsrc; // @[dcache.scala:633:32]
reg [33:0] s1_req_0_addr; // @[dcache.scala:633:32]
reg [63:0] s1_req_0_data; // @[dcache.scala:633:32]
reg s1_req_0_is_hella; // @[dcache.scala:633:32]
wire _s2_store_failed_T_2; // @[dcache.scala:787:67]
wire s2_store_failed; // @[dcache.scala:636:29]
wire _s1_valid_T_7 = _s1_valid_T_4; // @[dcache.scala:638:74, :639:85]
wire _s1_valid_T_9 = s2_store_failed & _s1_valid_T_8; // @[Decoupled.scala:51:35]
wire _s1_valid_T_10 = _s1_valid_T_9 & s0_req_0_uop_uses_stq; // @[dcache.scala:615:21, :641:{44,63}]
wire _s1_valid_T_11 = ~_s1_valid_T_10; // @[dcache.scala:641:{26,63}]
wire _s1_valid_T_12 = _s1_valid_T_7 & _s1_valid_T_11; // @[dcache.scala:639:85, :640:74, :641:26]
reg s1_valid_REG; // @[dcache.scala:638:25]
wire s1_valid_0 = s1_valid_REG; // @[dcache.scala:454:49, :638:25]
reg REG; // @[dcache.scala:645:43]
reg REG_1; // @[dcache.scala:645:72]
wire [3:0] _s1_nack_T = s1_req_0_addr[9:6]; // @[dcache.scala:633:32, :647:43]
wire [3:0] _s1_wb_idx_matches_T = s1_req_0_addr[9:6]; // @[dcache.scala:633:32, :647:43, :664:52]
wire _s1_nack_T_1 = _s1_nack_T == _prober_io_meta_write_bits_idx; // @[dcache.scala:459:22, :647:{43,59}]
wire _s1_nack_T_2 = ~_prober_io_req_ready; // @[dcache.scala:459:22, :647:96]
wire s1_nack_0 = _s1_nack_T_1 & _s1_nack_T_2; // @[dcache.scala:647:{59,93,96}]
wire _s2_nack_hit_WIRE_0 = s1_nack_0; // @[dcache.scala:647:93, :760:39]
reg s1_send_resp_or_nack_0; // @[dcache.scala:648:37]
reg [2:0] s1_type; // @[dcache.scala:649:32]
reg [1:0] s1_mshr_meta_read_way_en; // @[dcache.scala:651:41]
reg [1:0] s1_replay_way_en; // @[dcache.scala:652:41]
reg [1:0] s1_wb_way_en; // @[dcache.scala:653:41]
wire [23:0] _s1_tag_eq_way_T = s1_req_0_addr[33:10]; // @[dcache.scala:633:32, :657:95]
wire [23:0] _s1_tag_eq_way_T_2 = s1_req_0_addr[33:10]; // @[dcache.scala:633:32, :657:95]
wire _s1_tag_eq_way_T_1 = {2'h0, _meta_0_io_resp_0_tag} == _s1_tag_eq_way_T; // @[dcache.scala:469:41, :657:{79,95}]
wire _s1_tag_eq_way_WIRE_0 = _s1_tag_eq_way_T_1; // @[dcache.scala:656:47, :657:79]
wire _s1_tag_eq_way_T_3 = {2'h0, _meta_0_io_resp_1_tag} == _s1_tag_eq_way_T_2; // @[dcache.scala:469:41, :657:{79,95}]
wire _s1_tag_eq_way_WIRE_1 = _s1_tag_eq_way_T_3; // @[dcache.scala:656:47, :657:79]
wire [1:0] _s1_tag_eq_way_T_4 = {_s1_tag_eq_way_WIRE_1, _s1_tag_eq_way_WIRE_0}; // @[dcache.scala:656:47, :657:110]
wire [1:0] s1_tag_eq_way_0 = _s1_tag_eq_way_T_4; // @[dcache.scala:454:49, :657:110]
wire _s1_tag_match_way_T = s1_type == 3'h0; // @[dcache.scala:649:32, :659:38]
wire _s1_tag_match_way_T_1 = s1_type == 3'h2; // @[dcache.scala:649:32, :660:38]
wire _s1_tag_match_way_T_2 = s1_type == 3'h3; // @[dcache.scala:649:32, :661:38]
wire _s1_tag_match_way_T_3 = s1_tag_eq_way_0[0]; // @[dcache.scala:454:49, :662:63]
wire _s1_tag_match_way_T_4 = |_meta_0_io_resp_0_coh_state; // @[Metadata.scala:50:45]
wire _s1_tag_match_way_T_5 = _s1_tag_match_way_T_3 & _s1_tag_match_way_T_4; // @[Metadata.scala:50:45]
wire _s1_tag_match_way_WIRE_0 = _s1_tag_match_way_T_5; // @[dcache.scala:656:47, :662:67]
wire _s1_tag_match_way_T_6 = s1_tag_eq_way_0[1]; // @[dcache.scala:454:49, :662:63]
wire _s1_tag_match_way_T_7 = |_meta_0_io_resp_1_coh_state; // @[Metadata.scala:50:45]
wire _s1_tag_match_way_T_8 = _s1_tag_match_way_T_6 & _s1_tag_match_way_T_7; // @[Metadata.scala:50:45]
wire _s1_tag_match_way_WIRE_1 = _s1_tag_match_way_T_8; // @[dcache.scala:656:47, :662:67]
wire [1:0] _s1_tag_match_way_T_9 = {_s1_tag_match_way_WIRE_1, _s1_tag_match_way_WIRE_0}; // @[dcache.scala:656:47, :662:104]
wire [1:0] _s1_tag_match_way_T_10 = _s1_tag_match_way_T_2 ? s1_mshr_meta_read_way_en : _s1_tag_match_way_T_9; // @[dcache.scala:651:41, :661:{29,38}, :662:104]
wire [1:0] _s1_tag_match_way_T_11 = _s1_tag_match_way_T_1 ? s1_wb_way_en : _s1_tag_match_way_T_10; // @[dcache.scala:653:41, :660:{29,38}, :661:29]
wire [1:0] _s1_tag_match_way_T_12 = _s1_tag_match_way_T ? s1_replay_way_en : _s1_tag_match_way_T_11; // @[dcache.scala:652:41, :659:{29,38}, :660:29]
wire [1:0] s1_tag_match_way_0 = _s1_tag_match_way_T_12; // @[dcache.scala:454:49, :659:29]
wire _s1_wb_idx_matches_T_1 = _s1_wb_idx_matches_T == _wb_io_idx_bits; // @[dcache.scala:458:18, :664:{52,79}]
wire _s1_wb_idx_matches_T_2 = _s1_wb_idx_matches_T_1 & _wb_io_idx_valid; // @[dcache.scala:458:18, :664:{79,99}]
wire s1_wb_idx_matches_0 = _s1_wb_idx_matches_T_2; // @[dcache.scala:454:49, :664:99]
reg [31:0] s2_req_0_uop_inst; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_inst_0 = s2_req_0_uop_inst; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_inst_0 = s2_req_0_uop_inst; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_inst_0 = s2_req_0_uop_inst; // @[dcache.scala:438:7, :670:25]
reg [31:0] s2_req_0_uop_debug_inst; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_debug_inst_0 = s2_req_0_uop_debug_inst; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_debug_inst_0 = s2_req_0_uop_debug_inst; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_debug_inst_0 = s2_req_0_uop_debug_inst; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_is_rvc; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_is_rvc_0 = s2_req_0_uop_is_rvc; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_is_rvc_0 = s2_req_0_uop_is_rvc; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_is_rvc_0 = s2_req_0_uop_is_rvc; // @[dcache.scala:438:7, :670:25]
reg [33:0] s2_req_0_uop_debug_pc; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_debug_pc_0 = s2_req_0_uop_debug_pc; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_debug_pc_0 = s2_req_0_uop_debug_pc; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_debug_pc_0 = s2_req_0_uop_debug_pc; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_iq_type_0; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_iq_type_0_0 = s2_req_0_uop_iq_type_0; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_iq_type_0_0 = s2_req_0_uop_iq_type_0; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_iq_type_0_0 = s2_req_0_uop_iq_type_0; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_iq_type_1; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_iq_type_1_0 = s2_req_0_uop_iq_type_1; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_iq_type_1_0 = s2_req_0_uop_iq_type_1; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_iq_type_1_0 = s2_req_0_uop_iq_type_1; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_iq_type_2; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_iq_type_2_0 = s2_req_0_uop_iq_type_2; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_iq_type_2_0 = s2_req_0_uop_iq_type_2; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_iq_type_2_0 = s2_req_0_uop_iq_type_2; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_iq_type_3; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_iq_type_3_0 = s2_req_0_uop_iq_type_3; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_iq_type_3_0 = s2_req_0_uop_iq_type_3; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_iq_type_3_0 = s2_req_0_uop_iq_type_3; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fu_code_0; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fu_code_0_0 = s2_req_0_uop_fu_code_0; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fu_code_0_0 = s2_req_0_uop_fu_code_0; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fu_code_0_0 = s2_req_0_uop_fu_code_0; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fu_code_1; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fu_code_1_0 = s2_req_0_uop_fu_code_1; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fu_code_1_0 = s2_req_0_uop_fu_code_1; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fu_code_1_0 = s2_req_0_uop_fu_code_1; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fu_code_2; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fu_code_2_0 = s2_req_0_uop_fu_code_2; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fu_code_2_0 = s2_req_0_uop_fu_code_2; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fu_code_2_0 = s2_req_0_uop_fu_code_2; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fu_code_3; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fu_code_3_0 = s2_req_0_uop_fu_code_3; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fu_code_3_0 = s2_req_0_uop_fu_code_3; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fu_code_3_0 = s2_req_0_uop_fu_code_3; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fu_code_4; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fu_code_4_0 = s2_req_0_uop_fu_code_4; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fu_code_4_0 = s2_req_0_uop_fu_code_4; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fu_code_4_0 = s2_req_0_uop_fu_code_4; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fu_code_5; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fu_code_5_0 = s2_req_0_uop_fu_code_5; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fu_code_5_0 = s2_req_0_uop_fu_code_5; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fu_code_5_0 = s2_req_0_uop_fu_code_5; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fu_code_6; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fu_code_6_0 = s2_req_0_uop_fu_code_6; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fu_code_6_0 = s2_req_0_uop_fu_code_6; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fu_code_6_0 = s2_req_0_uop_fu_code_6; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fu_code_7; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fu_code_7_0 = s2_req_0_uop_fu_code_7; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fu_code_7_0 = s2_req_0_uop_fu_code_7; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fu_code_7_0 = s2_req_0_uop_fu_code_7; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fu_code_8; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fu_code_8_0 = s2_req_0_uop_fu_code_8; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fu_code_8_0 = s2_req_0_uop_fu_code_8; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fu_code_8_0 = s2_req_0_uop_fu_code_8; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fu_code_9; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fu_code_9_0 = s2_req_0_uop_fu_code_9; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fu_code_9_0 = s2_req_0_uop_fu_code_9; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fu_code_9_0 = s2_req_0_uop_fu_code_9; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_iw_issued; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_iw_issued_0 = s2_req_0_uop_iw_issued; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_iw_issued_0 = s2_req_0_uop_iw_issued; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_iw_issued_0 = s2_req_0_uop_iw_issued; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_iw_issued_partial_agen; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_iw_issued_partial_agen_0 = s2_req_0_uop_iw_issued_partial_agen; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_iw_issued_partial_agen_0 = s2_req_0_uop_iw_issued_partial_agen; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_iw_issued_partial_agen_0 = s2_req_0_uop_iw_issued_partial_agen; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_iw_issued_partial_dgen; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_iw_issued_partial_dgen_0 = s2_req_0_uop_iw_issued_partial_dgen; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_iw_issued_partial_dgen_0 = s2_req_0_uop_iw_issued_partial_dgen; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_iw_issued_partial_dgen_0 = s2_req_0_uop_iw_issued_partial_dgen; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_iw_p1_speculative_child; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_iw_p1_speculative_child_0 = s2_req_0_uop_iw_p1_speculative_child; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_iw_p1_speculative_child_0 = s2_req_0_uop_iw_p1_speculative_child; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_iw_p1_speculative_child_0 = s2_req_0_uop_iw_p1_speculative_child; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_iw_p2_speculative_child; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_iw_p2_speculative_child_0 = s2_req_0_uop_iw_p2_speculative_child; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_iw_p2_speculative_child_0 = s2_req_0_uop_iw_p2_speculative_child; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_iw_p2_speculative_child_0 = s2_req_0_uop_iw_p2_speculative_child; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_iw_p1_bypass_hint; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_iw_p1_bypass_hint_0 = s2_req_0_uop_iw_p1_bypass_hint; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_iw_p1_bypass_hint_0 = s2_req_0_uop_iw_p1_bypass_hint; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_iw_p1_bypass_hint_0 = s2_req_0_uop_iw_p1_bypass_hint; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_iw_p2_bypass_hint; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_iw_p2_bypass_hint_0 = s2_req_0_uop_iw_p2_bypass_hint; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_iw_p2_bypass_hint_0 = s2_req_0_uop_iw_p2_bypass_hint; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_iw_p2_bypass_hint_0 = s2_req_0_uop_iw_p2_bypass_hint; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_iw_p3_bypass_hint; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_iw_p3_bypass_hint_0 = s2_req_0_uop_iw_p3_bypass_hint; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_iw_p3_bypass_hint_0 = s2_req_0_uop_iw_p3_bypass_hint; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_iw_p3_bypass_hint_0 = s2_req_0_uop_iw_p3_bypass_hint; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_dis_col_sel; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_dis_col_sel_0 = s2_req_0_uop_dis_col_sel; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_dis_col_sel_0 = s2_req_0_uop_dis_col_sel; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_dis_col_sel_0 = s2_req_0_uop_dis_col_sel; // @[dcache.scala:438:7, :670:25]
reg [3:0] s2_req_0_uop_br_mask; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_br_mask_0 = s2_req_0_uop_br_mask; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_br_mask_0 = s2_req_0_uop_br_mask; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_br_mask_0 = s2_req_0_uop_br_mask; // @[dcache.scala:438:7, :670:25]
reg [1:0] s2_req_0_uop_br_tag; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_br_tag_0 = s2_req_0_uop_br_tag; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_br_tag_0 = s2_req_0_uop_br_tag; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_br_tag_0 = s2_req_0_uop_br_tag; // @[dcache.scala:438:7, :670:25]
reg [3:0] s2_req_0_uop_br_type; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_br_type_0 = s2_req_0_uop_br_type; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_br_type_0 = s2_req_0_uop_br_type; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_br_type_0 = s2_req_0_uop_br_type; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_is_sfb; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_is_sfb_0 = s2_req_0_uop_is_sfb; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_is_sfb_0 = s2_req_0_uop_is_sfb; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_is_sfb_0 = s2_req_0_uop_is_sfb; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_is_fence; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_is_fence_0 = s2_req_0_uop_is_fence; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_is_fence_0 = s2_req_0_uop_is_fence; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_is_fence_0 = s2_req_0_uop_is_fence; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_is_fencei; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_is_fencei_0 = s2_req_0_uop_is_fencei; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_is_fencei_0 = s2_req_0_uop_is_fencei; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_is_fencei_0 = s2_req_0_uop_is_fencei; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_is_sfence; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_is_sfence_0 = s2_req_0_uop_is_sfence; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_is_sfence_0 = s2_req_0_uop_is_sfence; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_is_sfence_0 = s2_req_0_uop_is_sfence; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_is_amo; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_is_amo_0 = s2_req_0_uop_is_amo; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_is_amo_0 = s2_req_0_uop_is_amo; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_is_amo_0 = s2_req_0_uop_is_amo; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_is_eret; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_is_eret_0 = s2_req_0_uop_is_eret; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_is_eret_0 = s2_req_0_uop_is_eret; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_is_eret_0 = s2_req_0_uop_is_eret; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_is_sys_pc2epc; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_is_sys_pc2epc_0 = s2_req_0_uop_is_sys_pc2epc; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_is_sys_pc2epc_0 = s2_req_0_uop_is_sys_pc2epc; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_is_sys_pc2epc_0 = s2_req_0_uop_is_sys_pc2epc; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_is_rocc; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_is_rocc_0 = s2_req_0_uop_is_rocc; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_is_rocc_0 = s2_req_0_uop_is_rocc; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_is_rocc_0 = s2_req_0_uop_is_rocc; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_is_mov; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_is_mov_0 = s2_req_0_uop_is_mov; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_is_mov_0 = s2_req_0_uop_is_mov; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_is_mov_0 = s2_req_0_uop_is_mov; // @[dcache.scala:438:7, :670:25]
reg [3:0] s2_req_0_uop_ftq_idx; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_ftq_idx_0 = s2_req_0_uop_ftq_idx; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_ftq_idx_0 = s2_req_0_uop_ftq_idx; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_ftq_idx_0 = s2_req_0_uop_ftq_idx; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_edge_inst; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_edge_inst_0 = s2_req_0_uop_edge_inst; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_edge_inst_0 = s2_req_0_uop_edge_inst; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_edge_inst_0 = s2_req_0_uop_edge_inst; // @[dcache.scala:438:7, :670:25]
reg [5:0] s2_req_0_uop_pc_lob; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_pc_lob_0 = s2_req_0_uop_pc_lob; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_pc_lob_0 = s2_req_0_uop_pc_lob; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_pc_lob_0 = s2_req_0_uop_pc_lob; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_taken; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_taken_0 = s2_req_0_uop_taken; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_taken_0 = s2_req_0_uop_taken; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_taken_0 = s2_req_0_uop_taken; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_imm_rename; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_imm_rename_0 = s2_req_0_uop_imm_rename; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_imm_rename_0 = s2_req_0_uop_imm_rename; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_imm_rename_0 = s2_req_0_uop_imm_rename; // @[dcache.scala:438:7, :670:25]
reg [2:0] s2_req_0_uop_imm_sel; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_imm_sel_0 = s2_req_0_uop_imm_sel; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_imm_sel_0 = s2_req_0_uop_imm_sel; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_imm_sel_0 = s2_req_0_uop_imm_sel; // @[dcache.scala:438:7, :670:25]
reg [4:0] s2_req_0_uop_pimm; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_pimm_0 = s2_req_0_uop_pimm; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_pimm_0 = s2_req_0_uop_pimm; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_pimm_0 = s2_req_0_uop_pimm; // @[dcache.scala:438:7, :670:25]
reg [19:0] s2_req_0_uop_imm_packed; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_imm_packed_0 = s2_req_0_uop_imm_packed; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_imm_packed_0 = s2_req_0_uop_imm_packed; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_imm_packed_0 = s2_req_0_uop_imm_packed; // @[dcache.scala:438:7, :670:25]
reg [1:0] s2_req_0_uop_op1_sel; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_op1_sel_0 = s2_req_0_uop_op1_sel; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_op1_sel_0 = s2_req_0_uop_op1_sel; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_op1_sel_0 = s2_req_0_uop_op1_sel; // @[dcache.scala:438:7, :670:25]
reg [2:0] s2_req_0_uop_op2_sel; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_op2_sel_0 = s2_req_0_uop_op2_sel; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_op2_sel_0 = s2_req_0_uop_op2_sel; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_op2_sel_0 = s2_req_0_uop_op2_sel; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fp_ctrl_ldst; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fp_ctrl_ldst_0 = s2_req_0_uop_fp_ctrl_ldst; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fp_ctrl_ldst_0 = s2_req_0_uop_fp_ctrl_ldst; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fp_ctrl_ldst_0 = s2_req_0_uop_fp_ctrl_ldst; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fp_ctrl_wen; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fp_ctrl_wen_0 = s2_req_0_uop_fp_ctrl_wen; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fp_ctrl_wen_0 = s2_req_0_uop_fp_ctrl_wen; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fp_ctrl_wen_0 = s2_req_0_uop_fp_ctrl_wen; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fp_ctrl_ren1; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fp_ctrl_ren1_0 = s2_req_0_uop_fp_ctrl_ren1; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fp_ctrl_ren1_0 = s2_req_0_uop_fp_ctrl_ren1; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fp_ctrl_ren1_0 = s2_req_0_uop_fp_ctrl_ren1; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fp_ctrl_ren2; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fp_ctrl_ren2_0 = s2_req_0_uop_fp_ctrl_ren2; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fp_ctrl_ren2_0 = s2_req_0_uop_fp_ctrl_ren2; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fp_ctrl_ren2_0 = s2_req_0_uop_fp_ctrl_ren2; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fp_ctrl_ren3; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fp_ctrl_ren3_0 = s2_req_0_uop_fp_ctrl_ren3; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fp_ctrl_ren3_0 = s2_req_0_uop_fp_ctrl_ren3; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fp_ctrl_ren3_0 = s2_req_0_uop_fp_ctrl_ren3; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fp_ctrl_swap12; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fp_ctrl_swap12_0 = s2_req_0_uop_fp_ctrl_swap12; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fp_ctrl_swap12_0 = s2_req_0_uop_fp_ctrl_swap12; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fp_ctrl_swap12_0 = s2_req_0_uop_fp_ctrl_swap12; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fp_ctrl_swap23; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fp_ctrl_swap23_0 = s2_req_0_uop_fp_ctrl_swap23; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fp_ctrl_swap23_0 = s2_req_0_uop_fp_ctrl_swap23; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fp_ctrl_swap23_0 = s2_req_0_uop_fp_ctrl_swap23; // @[dcache.scala:438:7, :670:25]
reg [1:0] s2_req_0_uop_fp_ctrl_typeTagIn; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fp_ctrl_typeTagIn_0 = s2_req_0_uop_fp_ctrl_typeTagIn; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fp_ctrl_typeTagIn_0 = s2_req_0_uop_fp_ctrl_typeTagIn; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fp_ctrl_typeTagIn_0 = s2_req_0_uop_fp_ctrl_typeTagIn; // @[dcache.scala:438:7, :670:25]
reg [1:0] s2_req_0_uop_fp_ctrl_typeTagOut; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fp_ctrl_typeTagOut_0 = s2_req_0_uop_fp_ctrl_typeTagOut; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fp_ctrl_typeTagOut_0 = s2_req_0_uop_fp_ctrl_typeTagOut; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fp_ctrl_typeTagOut_0 = s2_req_0_uop_fp_ctrl_typeTagOut; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fp_ctrl_fromint; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fp_ctrl_fromint_0 = s2_req_0_uop_fp_ctrl_fromint; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fp_ctrl_fromint_0 = s2_req_0_uop_fp_ctrl_fromint; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fp_ctrl_fromint_0 = s2_req_0_uop_fp_ctrl_fromint; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fp_ctrl_toint; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fp_ctrl_toint_0 = s2_req_0_uop_fp_ctrl_toint; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fp_ctrl_toint_0 = s2_req_0_uop_fp_ctrl_toint; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fp_ctrl_toint_0 = s2_req_0_uop_fp_ctrl_toint; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fp_ctrl_fastpipe; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fp_ctrl_fastpipe_0 = s2_req_0_uop_fp_ctrl_fastpipe; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fp_ctrl_fastpipe_0 = s2_req_0_uop_fp_ctrl_fastpipe; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fp_ctrl_fastpipe_0 = s2_req_0_uop_fp_ctrl_fastpipe; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fp_ctrl_fma; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fp_ctrl_fma_0 = s2_req_0_uop_fp_ctrl_fma; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fp_ctrl_fma_0 = s2_req_0_uop_fp_ctrl_fma; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fp_ctrl_fma_0 = s2_req_0_uop_fp_ctrl_fma; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fp_ctrl_div; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fp_ctrl_div_0 = s2_req_0_uop_fp_ctrl_div; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fp_ctrl_div_0 = s2_req_0_uop_fp_ctrl_div; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fp_ctrl_div_0 = s2_req_0_uop_fp_ctrl_div; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fp_ctrl_sqrt; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fp_ctrl_sqrt_0 = s2_req_0_uop_fp_ctrl_sqrt; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fp_ctrl_sqrt_0 = s2_req_0_uop_fp_ctrl_sqrt; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fp_ctrl_sqrt_0 = s2_req_0_uop_fp_ctrl_sqrt; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fp_ctrl_wflags; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fp_ctrl_wflags_0 = s2_req_0_uop_fp_ctrl_wflags; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fp_ctrl_wflags_0 = s2_req_0_uop_fp_ctrl_wflags; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fp_ctrl_wflags_0 = s2_req_0_uop_fp_ctrl_wflags; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fp_ctrl_vec; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fp_ctrl_vec_0 = s2_req_0_uop_fp_ctrl_vec; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fp_ctrl_vec_0 = s2_req_0_uop_fp_ctrl_vec; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fp_ctrl_vec_0 = s2_req_0_uop_fp_ctrl_vec; // @[dcache.scala:438:7, :670:25]
reg [4:0] s2_req_0_uop_rob_idx; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_rob_idx_0 = s2_req_0_uop_rob_idx; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_rob_idx_0 = s2_req_0_uop_rob_idx; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_rob_idx_0 = s2_req_0_uop_rob_idx; // @[dcache.scala:438:7, :670:25]
reg [3:0] s2_req_0_uop_ldq_idx; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_ldq_idx_0 = s2_req_0_uop_ldq_idx; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_ldq_idx_0 = s2_req_0_uop_ldq_idx; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_ldq_idx_0 = s2_req_0_uop_ldq_idx; // @[dcache.scala:438:7, :670:25]
reg [3:0] s2_req_0_uop_stq_idx; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_stq_idx_0 = s2_req_0_uop_stq_idx; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_stq_idx_0 = s2_req_0_uop_stq_idx; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_stq_idx_0 = s2_req_0_uop_stq_idx; // @[dcache.scala:438:7, :670:25]
reg [1:0] s2_req_0_uop_rxq_idx; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_rxq_idx_0 = s2_req_0_uop_rxq_idx; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_rxq_idx_0 = s2_req_0_uop_rxq_idx; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_rxq_idx_0 = s2_req_0_uop_rxq_idx; // @[dcache.scala:438:7, :670:25]
reg [5:0] s2_req_0_uop_pdst; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_pdst_0 = s2_req_0_uop_pdst; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_pdst_0 = s2_req_0_uop_pdst; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_pdst_0 = s2_req_0_uop_pdst; // @[dcache.scala:438:7, :670:25]
reg [5:0] s2_req_0_uop_prs1; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_prs1_0 = s2_req_0_uop_prs1; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_prs1_0 = s2_req_0_uop_prs1; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_prs1_0 = s2_req_0_uop_prs1; // @[dcache.scala:438:7, :670:25]
reg [5:0] s2_req_0_uop_prs2; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_prs2_0 = s2_req_0_uop_prs2; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_prs2_0 = s2_req_0_uop_prs2; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_prs2_0 = s2_req_0_uop_prs2; // @[dcache.scala:438:7, :670:25]
reg [5:0] s2_req_0_uop_prs3; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_prs3_0 = s2_req_0_uop_prs3; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_prs3_0 = s2_req_0_uop_prs3; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_prs3_0 = s2_req_0_uop_prs3; // @[dcache.scala:438:7, :670:25]
reg [3:0] s2_req_0_uop_ppred; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_ppred_0 = s2_req_0_uop_ppred; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_ppred_0 = s2_req_0_uop_ppred; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_ppred_0 = s2_req_0_uop_ppred; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_prs1_busy; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_prs1_busy_0 = s2_req_0_uop_prs1_busy; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_prs1_busy_0 = s2_req_0_uop_prs1_busy; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_prs1_busy_0 = s2_req_0_uop_prs1_busy; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_prs2_busy; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_prs2_busy_0 = s2_req_0_uop_prs2_busy; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_prs2_busy_0 = s2_req_0_uop_prs2_busy; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_prs2_busy_0 = s2_req_0_uop_prs2_busy; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_prs3_busy; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_prs3_busy_0 = s2_req_0_uop_prs3_busy; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_prs3_busy_0 = s2_req_0_uop_prs3_busy; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_prs3_busy_0 = s2_req_0_uop_prs3_busy; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_ppred_busy; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_ppred_busy_0 = s2_req_0_uop_ppred_busy; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_ppred_busy_0 = s2_req_0_uop_ppred_busy; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_ppred_busy_0 = s2_req_0_uop_ppred_busy; // @[dcache.scala:438:7, :670:25]
reg [5:0] s2_req_0_uop_stale_pdst; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_stale_pdst_0 = s2_req_0_uop_stale_pdst; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_stale_pdst_0 = s2_req_0_uop_stale_pdst; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_stale_pdst_0 = s2_req_0_uop_stale_pdst; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_exception; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_exception_0 = s2_req_0_uop_exception; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_exception_0 = s2_req_0_uop_exception; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_exception_0 = s2_req_0_uop_exception; // @[dcache.scala:438:7, :670:25]
reg [63:0] s2_req_0_uop_exc_cause; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_exc_cause_0 = s2_req_0_uop_exc_cause; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_exc_cause_0 = s2_req_0_uop_exc_cause; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_exc_cause_0 = s2_req_0_uop_exc_cause; // @[dcache.scala:438:7, :670:25]
reg [4:0] s2_req_0_uop_mem_cmd; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_mem_cmd_0 = s2_req_0_uop_mem_cmd; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_mem_cmd_0 = s2_req_0_uop_mem_cmd; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_mem_cmd_0 = s2_req_0_uop_mem_cmd; // @[dcache.scala:438:7, :670:25]
reg [1:0] s2_req_0_uop_mem_size; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_mem_size_0 = s2_req_0_uop_mem_size; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_mem_size_0 = s2_req_0_uop_mem_size; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_mem_size_0 = s2_req_0_uop_mem_size; // @[dcache.scala:438:7, :670:25]
wire [1:0] size = s2_req_0_uop_mem_size; // @[AMOALU.scala:11:18]
reg s2_req_0_uop_mem_signed; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_mem_signed_0 = s2_req_0_uop_mem_signed; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_mem_signed_0 = s2_req_0_uop_mem_signed; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_mem_signed_0 = s2_req_0_uop_mem_signed; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_uses_ldq; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_uses_ldq_0 = s2_req_0_uop_uses_ldq; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_uses_ldq_0 = s2_req_0_uop_uses_ldq; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_uses_ldq_0 = s2_req_0_uop_uses_ldq; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_uses_stq; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_uses_stq_0 = s2_req_0_uop_uses_stq; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_uses_stq_0 = s2_req_0_uop_uses_stq; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_uses_stq_0 = s2_req_0_uop_uses_stq; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_is_unique; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_is_unique_0 = s2_req_0_uop_is_unique; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_is_unique_0 = s2_req_0_uop_is_unique; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_is_unique_0 = s2_req_0_uop_is_unique; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_flush_on_commit; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_flush_on_commit_0 = s2_req_0_uop_flush_on_commit; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_flush_on_commit_0 = s2_req_0_uop_flush_on_commit; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_flush_on_commit_0 = s2_req_0_uop_flush_on_commit; // @[dcache.scala:438:7, :670:25]
reg [2:0] s2_req_0_uop_csr_cmd; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_csr_cmd_0 = s2_req_0_uop_csr_cmd; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_csr_cmd_0 = s2_req_0_uop_csr_cmd; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_csr_cmd_0 = s2_req_0_uop_csr_cmd; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_ldst_is_rs1; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_ldst_is_rs1_0 = s2_req_0_uop_ldst_is_rs1; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_ldst_is_rs1_0 = s2_req_0_uop_ldst_is_rs1; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_ldst_is_rs1_0 = s2_req_0_uop_ldst_is_rs1; // @[dcache.scala:438:7, :670:25]
reg [5:0] s2_req_0_uop_ldst; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_ldst_0 = s2_req_0_uop_ldst; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_ldst_0 = s2_req_0_uop_ldst; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_ldst_0 = s2_req_0_uop_ldst; // @[dcache.scala:438:7, :670:25]
reg [5:0] s2_req_0_uop_lrs1; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_lrs1_0 = s2_req_0_uop_lrs1; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_lrs1_0 = s2_req_0_uop_lrs1; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_lrs1_0 = s2_req_0_uop_lrs1; // @[dcache.scala:438:7, :670:25]
reg [5:0] s2_req_0_uop_lrs2; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_lrs2_0 = s2_req_0_uop_lrs2; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_lrs2_0 = s2_req_0_uop_lrs2; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_lrs2_0 = s2_req_0_uop_lrs2; // @[dcache.scala:438:7, :670:25]
reg [5:0] s2_req_0_uop_lrs3; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_lrs3_0 = s2_req_0_uop_lrs3; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_lrs3_0 = s2_req_0_uop_lrs3; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_lrs3_0 = s2_req_0_uop_lrs3; // @[dcache.scala:438:7, :670:25]
reg [1:0] s2_req_0_uop_dst_rtype; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_dst_rtype_0 = s2_req_0_uop_dst_rtype; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_dst_rtype_0 = s2_req_0_uop_dst_rtype; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_dst_rtype_0 = s2_req_0_uop_dst_rtype; // @[dcache.scala:438:7, :670:25]
reg [1:0] s2_req_0_uop_lrs1_rtype; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_lrs1_rtype_0 = s2_req_0_uop_lrs1_rtype; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_lrs1_rtype_0 = s2_req_0_uop_lrs1_rtype; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_lrs1_rtype_0 = s2_req_0_uop_lrs1_rtype; // @[dcache.scala:438:7, :670:25]
reg [1:0] s2_req_0_uop_lrs2_rtype; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_lrs2_rtype_0 = s2_req_0_uop_lrs2_rtype; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_lrs2_rtype_0 = s2_req_0_uop_lrs2_rtype; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_lrs2_rtype_0 = s2_req_0_uop_lrs2_rtype; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_frs3_en; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_frs3_en_0 = s2_req_0_uop_frs3_en; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_frs3_en_0 = s2_req_0_uop_frs3_en; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_frs3_en_0 = s2_req_0_uop_frs3_en; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fcn_dw; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fcn_dw_0 = s2_req_0_uop_fcn_dw; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fcn_dw_0 = s2_req_0_uop_fcn_dw; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fcn_dw_0 = s2_req_0_uop_fcn_dw; // @[dcache.scala:438:7, :670:25]
reg [4:0] s2_req_0_uop_fcn_op; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fcn_op_0 = s2_req_0_uop_fcn_op; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fcn_op_0 = s2_req_0_uop_fcn_op; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fcn_op_0 = s2_req_0_uop_fcn_op; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_fp_val; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fp_val_0 = s2_req_0_uop_fp_val; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fp_val_0 = s2_req_0_uop_fp_val; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fp_val_0 = s2_req_0_uop_fp_val; // @[dcache.scala:438:7, :670:25]
reg [2:0] s2_req_0_uop_fp_rm; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fp_rm_0 = s2_req_0_uop_fp_rm; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fp_rm_0 = s2_req_0_uop_fp_rm; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fp_rm_0 = s2_req_0_uop_fp_rm; // @[dcache.scala:438:7, :670:25]
reg [1:0] s2_req_0_uop_fp_typ; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_fp_typ_0 = s2_req_0_uop_fp_typ; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_fp_typ_0 = s2_req_0_uop_fp_typ; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_fp_typ_0 = s2_req_0_uop_fp_typ; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_xcpt_pf_if; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_xcpt_pf_if_0 = s2_req_0_uop_xcpt_pf_if; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_xcpt_pf_if_0 = s2_req_0_uop_xcpt_pf_if; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_xcpt_pf_if_0 = s2_req_0_uop_xcpt_pf_if; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_xcpt_ae_if; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_xcpt_ae_if_0 = s2_req_0_uop_xcpt_ae_if; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_xcpt_ae_if_0 = s2_req_0_uop_xcpt_ae_if; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_xcpt_ae_if_0 = s2_req_0_uop_xcpt_ae_if; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_xcpt_ma_if; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_xcpt_ma_if_0 = s2_req_0_uop_xcpt_ma_if; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_xcpt_ma_if_0 = s2_req_0_uop_xcpt_ma_if; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_xcpt_ma_if_0 = s2_req_0_uop_xcpt_ma_if; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_bp_debug_if; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_bp_debug_if_0 = s2_req_0_uop_bp_debug_if; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_bp_debug_if_0 = s2_req_0_uop_bp_debug_if; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_bp_debug_if_0 = s2_req_0_uop_bp_debug_if; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_uop_bp_xcpt_if; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_bp_xcpt_if_0 = s2_req_0_uop_bp_xcpt_if; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_bp_xcpt_if_0 = s2_req_0_uop_bp_xcpt_if; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_bp_xcpt_if_0 = s2_req_0_uop_bp_xcpt_if; // @[dcache.scala:438:7, :670:25]
reg [2:0] s2_req_0_uop_debug_fsrc; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_debug_fsrc_0 = s2_req_0_uop_debug_fsrc; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_debug_fsrc_0 = s2_req_0_uop_debug_fsrc; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_debug_fsrc_0 = s2_req_0_uop_debug_fsrc; // @[dcache.scala:438:7, :670:25]
reg [2:0] s2_req_0_uop_debug_tsrc; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_uop_debug_tsrc_0 = s2_req_0_uop_debug_tsrc; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_uop_debug_tsrc_0 = s2_req_0_uop_debug_tsrc; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_uop_debug_tsrc_0 = s2_req_0_uop_debug_tsrc; // @[dcache.scala:438:7, :670:25]
reg [33:0] s2_req_0_addr; // @[dcache.scala:670:25]
assign io_lsu_store_ack_0_bits_addr_0 = s2_req_0_addr; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_addr_0 = s2_req_0_addr; // @[dcache.scala:438:7, :670:25]
reg [63:0] s2_req_0_data; // @[dcache.scala:670:25]
assign io_lsu_store_ack_0_bits_data_0 = s2_req_0_data; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_data_0 = s2_req_0_data; // @[dcache.scala:438:7, :670:25]
reg s2_req_0_is_hella; // @[dcache.scala:670:25]
assign io_lsu_resp_0_bits_is_hella_0 = s2_req_0_is_hella; // @[dcache.scala:438:7, :670:25]
assign io_lsu_store_ack_0_bits_is_hella_0 = s2_req_0_is_hella; // @[dcache.scala:438:7, :670:25]
assign io_lsu_nack_0_bits_is_hella_0 = s2_req_0_is_hella; // @[dcache.scala:438:7, :670:25]
reg [2:0] s2_type; // @[dcache.scala:671:25]
wire _s2_valid_T = ~io_lsu_s1_kill_0_0; // @[dcache.scala:438:7, :674:26]
wire _s2_valid_T_1 = s1_valid_0 & _s2_valid_T; // @[dcache.scala:454:49, :673:39, :674:26]
wire _s2_valid_T_6 = _s2_valid_T_1; // @[dcache.scala:673:39, :674:45]
wire _s2_valid_T_9 = _s2_valid_T_6; // @[dcache.scala:674:45, :675:85]
wire _s2_valid_T_10 = s1_type == 3'h4; // @[dcache.scala:649:32, :677:56]
wire _s2_valid_T_11 = s2_store_failed & _s2_valid_T_10; // @[dcache.scala:636:29, :677:{44,56}]
wire _s2_valid_T_12 = _s2_valid_T_11 & s1_req_0_uop_uses_stq; // @[dcache.scala:633:32, :677:{44,67}]
wire _s2_valid_T_13 = ~_s2_valid_T_12; // @[dcache.scala:677:{26,67}]
wire _s2_valid_T_14 = _s2_valid_T_9 & _s2_valid_T_13; // @[dcache.scala:675:85, :676:72, :677:26]
reg s2_valid_REG; // @[dcache.scala:673:26]
wire s2_valid_0 = s2_valid_REG; // @[dcache.scala:454:49, :673:26]
reg [1:0] s2_tag_match_way_0; // @[dcache.scala:681:33]
wire s2_tag_match_0 = |s2_tag_match_way_0; // @[dcache.scala:681:33, :682:49]
reg [1:0] s2_hit_state_REG_state; // @[dcache.scala:683:93]
wire [1:0] _s2_hit_state_WIRE_0_state = s2_hit_state_REG_state; // @[dcache.scala:656:47, :683:93]
reg [1:0] s2_hit_state_REG_1_state; // @[dcache.scala:683:93]
wire [1:0] _s2_hit_state_WIRE_1_state_0 = s2_hit_state_REG_1_state; // @[dcache.scala:656:47, :683:93]
wire _s2_hit_state_T = s2_tag_match_way_0[0]; // @[Mux.scala:32:36]
wire _s2_data_muxed_T = s2_tag_match_way_0[0]; // @[Mux.scala:32:36]
wire _mshrs_io_meta_resp_bits_T = s2_tag_match_way_0[0]; // @[Mux.scala:32:36]
wire _s2_hit_state_T_1 = s2_tag_match_way_0[1]; // @[Mux.scala:32:36]
wire _s2_data_muxed_T_1 = s2_tag_match_way_0[1]; // @[Mux.scala:32:36]
wire _mshrs_io_meta_resp_bits_T_1 = s2_tag_match_way_0[1]; // @[Mux.scala:32:36]
wire [1:0] _s2_hit_state_WIRE_2; // @[Mux.scala:30:73]
wire [1:0] s2_hit_state_0_state = _s2_hit_state_WIRE_1_state; // @[Mux.scala:30:73]
wire [1:0] _s2_hit_state_T_2 = _s2_hit_state_T ? _s2_hit_state_WIRE_0_state : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _s2_hit_state_T_3 = _s2_hit_state_T_1 ? _s2_hit_state_WIRE_1_state_0 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _s2_hit_state_T_4 = _s2_hit_state_T_2 | _s2_hit_state_T_3; // @[Mux.scala:30:73]
assign _s2_hit_state_WIRE_2 = _s2_hit_state_T_4; // @[Mux.scala:30:73]
assign _s2_hit_state_WIRE_1_state = _s2_hit_state_WIRE_2; // @[Mux.scala:30:73]
wire [1:0] mshrs_io_req_0_bits_old_meta_meta_coh_state = s2_hit_state_0_state; // @[HellaCache.scala:305:20]
wire _GEN_2 = s2_req_0_uop_mem_cmd == 5'h1; // @[Consts.scala:90:32]
wire _s2_has_permission_r_c_cat_T; // @[Consts.scala:90:32]
assign _s2_has_permission_r_c_cat_T = _GEN_2; // @[Consts.scala:90:32]
wire _s2_has_permission_r_c_cat_T_23; // @[Consts.scala:90:32]
assign _s2_has_permission_r_c_cat_T_23 = _GEN_2; // @[Consts.scala:90:32]
wire _s2_new_hit_state_r_c_cat_T; // @[Consts.scala:90:32]
assign _s2_new_hit_state_r_c_cat_T = _GEN_2; // @[Consts.scala:90:32]
wire _s2_new_hit_state_r_c_cat_T_23; // @[Consts.scala:90:32]
assign _s2_new_hit_state_r_c_cat_T_23 = _GEN_2; // @[Consts.scala:90:32]
wire _s2_send_store_ack_T_2; // @[Consts.scala:90:32]
assign _s2_send_store_ack_T_2 = _GEN_2; // @[Consts.scala:90:32]
wire _mshrs_io_req_0_valid_T_46; // @[Consts.scala:90:32]
assign _mshrs_io_req_0_valid_T_46 = _GEN_2; // @[Consts.scala:90:32]
wire _s3_valid_T_1; // @[Consts.scala:90:32]
assign _s3_valid_T_1 = _GEN_2; // @[Consts.scala:90:32]
wire _GEN_3 = s2_req_0_uop_mem_cmd == 5'h11; // @[Consts.scala:90:49]
wire _s2_has_permission_r_c_cat_T_1; // @[Consts.scala:90:49]
assign _s2_has_permission_r_c_cat_T_1 = _GEN_3; // @[Consts.scala:90:49]
wire _s2_has_permission_r_c_cat_T_24; // @[Consts.scala:90:49]
assign _s2_has_permission_r_c_cat_T_24 = _GEN_3; // @[Consts.scala:90:49]
wire _s2_new_hit_state_r_c_cat_T_1; // @[Consts.scala:90:49]
assign _s2_new_hit_state_r_c_cat_T_1 = _GEN_3; // @[Consts.scala:90:49]
wire _s2_new_hit_state_r_c_cat_T_24; // @[Consts.scala:90:49]
assign _s2_new_hit_state_r_c_cat_T_24 = _GEN_3; // @[Consts.scala:90:49]
wire _s2_send_store_ack_T_3; // @[Consts.scala:90:49]
assign _s2_send_store_ack_T_3 = _GEN_3; // @[Consts.scala:90:49]
wire _mshrs_io_req_0_valid_T_47; // @[Consts.scala:90:49]
assign _mshrs_io_req_0_valid_T_47 = _GEN_3; // @[Consts.scala:90:49]
wire _s3_valid_T_2; // @[Consts.scala:90:49]
assign _s3_valid_T_2 = _GEN_3; // @[Consts.scala:90:49]
wire _s2_has_permission_r_c_cat_T_2 = _s2_has_permission_r_c_cat_T | _s2_has_permission_r_c_cat_T_1; // @[Consts.scala:90:{32,42,49}]
wire _GEN_4 = s2_req_0_uop_mem_cmd == 5'h7; // @[Consts.scala:90:66]
wire _s2_has_permission_r_c_cat_T_3; // @[Consts.scala:90:66]
assign _s2_has_permission_r_c_cat_T_3 = _GEN_4; // @[Consts.scala:90:66]
wire _s2_has_permission_r_c_cat_T_26; // @[Consts.scala:90:66]
assign _s2_has_permission_r_c_cat_T_26 = _GEN_4; // @[Consts.scala:90:66]
wire _s2_new_hit_state_r_c_cat_T_3; // @[Consts.scala:90:66]
assign _s2_new_hit_state_r_c_cat_T_3 = _GEN_4; // @[Consts.scala:90:66]
wire _s2_new_hit_state_r_c_cat_T_26; // @[Consts.scala:90:66]
assign _s2_new_hit_state_r_c_cat_T_26 = _GEN_4; // @[Consts.scala:90:66]
wire _s2_sc_T; // @[dcache.scala:702:37]
assign _s2_sc_T = _GEN_4; // @[Consts.scala:90:66]
wire _s2_send_resp_T_10; // @[package.scala:16:47]
assign _s2_send_resp_T_10 = _GEN_4; // @[package.scala:16:47]
wire _s2_send_store_ack_T_5; // @[Consts.scala:90:66]
assign _s2_send_store_ack_T_5 = _GEN_4; // @[Consts.scala:90:66]
wire _mshrs_io_req_0_valid_T_23; // @[package.scala:16:47]
assign _mshrs_io_req_0_valid_T_23 = _GEN_4; // @[package.scala:16:47]
wire _mshrs_io_req_0_valid_T_49; // @[Consts.scala:90:66]
assign _mshrs_io_req_0_valid_T_49 = _GEN_4; // @[Consts.scala:90:66]
wire _s3_valid_T_4; // @[Consts.scala:90:66]
assign _s3_valid_T_4 = _GEN_4; // @[Consts.scala:90:66]
wire _s2_has_permission_r_c_cat_T_4 = _s2_has_permission_r_c_cat_T_2 | _s2_has_permission_r_c_cat_T_3; // @[Consts.scala:90:{42,59,66}]
wire _GEN_5 = s2_req_0_uop_mem_cmd == 5'h4; // @[package.scala:16:47]
wire _s2_has_permission_r_c_cat_T_5; // @[package.scala:16:47]
assign _s2_has_permission_r_c_cat_T_5 = _GEN_5; // @[package.scala:16:47]
wire _s2_has_permission_r_c_cat_T_28; // @[package.scala:16:47]
assign _s2_has_permission_r_c_cat_T_28 = _GEN_5; // @[package.scala:16:47]
wire _s2_new_hit_state_r_c_cat_T_5; // @[package.scala:16:47]
assign _s2_new_hit_state_r_c_cat_T_5 = _GEN_5; // @[package.scala:16:47]
wire _s2_new_hit_state_r_c_cat_T_28; // @[package.scala:16:47]
assign _s2_new_hit_state_r_c_cat_T_28 = _GEN_5; // @[package.scala:16:47]
wire _s2_send_resp_T_14; // @[package.scala:16:47]
assign _s2_send_resp_T_14 = _GEN_5; // @[package.scala:16:47]
wire _s2_send_store_ack_T_7; // @[package.scala:16:47]
assign _s2_send_store_ack_T_7 = _GEN_5; // @[package.scala:16:47]
wire _mshrs_io_req_0_valid_T_27; // @[package.scala:16:47]
assign _mshrs_io_req_0_valid_T_27 = _GEN_5; // @[package.scala:16:47]
wire _mshrs_io_req_0_valid_T_51; // @[package.scala:16:47]
assign _mshrs_io_req_0_valid_T_51 = _GEN_5; // @[package.scala:16:47]
wire _s3_valid_T_6; // @[package.scala:16:47]
assign _s3_valid_T_6 = _GEN_5; // @[package.scala:16:47]
wire _GEN_6 = s2_req_0_uop_mem_cmd == 5'h9; // @[package.scala:16:47]
wire _s2_has_permission_r_c_cat_T_6; // @[package.scala:16:47]
assign _s2_has_permission_r_c_cat_T_6 = _GEN_6; // @[package.scala:16:47]
wire _s2_has_permission_r_c_cat_T_29; // @[package.scala:16:47]
assign _s2_has_permission_r_c_cat_T_29 = _GEN_6; // @[package.scala:16:47]
wire _s2_new_hit_state_r_c_cat_T_6; // @[package.scala:16:47]
assign _s2_new_hit_state_r_c_cat_T_6 = _GEN_6; // @[package.scala:16:47]
wire _s2_new_hit_state_r_c_cat_T_29; // @[package.scala:16:47]
assign _s2_new_hit_state_r_c_cat_T_29 = _GEN_6; // @[package.scala:16:47]
wire _s2_send_resp_T_15; // @[package.scala:16:47]
assign _s2_send_resp_T_15 = _GEN_6; // @[package.scala:16:47]
wire _s2_send_store_ack_T_8; // @[package.scala:16:47]
assign _s2_send_store_ack_T_8 = _GEN_6; // @[package.scala:16:47]
wire _mshrs_io_req_0_valid_T_28; // @[package.scala:16:47]
assign _mshrs_io_req_0_valid_T_28 = _GEN_6; // @[package.scala:16:47]
wire _mshrs_io_req_0_valid_T_52; // @[package.scala:16:47]
assign _mshrs_io_req_0_valid_T_52 = _GEN_6; // @[package.scala:16:47]
wire _s3_valid_T_7; // @[package.scala:16:47]
assign _s3_valid_T_7 = _GEN_6; // @[package.scala:16:47]
wire _GEN_7 = s2_req_0_uop_mem_cmd == 5'hA; // @[package.scala:16:47]
wire _s2_has_permission_r_c_cat_T_7; // @[package.scala:16:47]
assign _s2_has_permission_r_c_cat_T_7 = _GEN_7; // @[package.scala:16:47]
wire _s2_has_permission_r_c_cat_T_30; // @[package.scala:16:47]
assign _s2_has_permission_r_c_cat_T_30 = _GEN_7; // @[package.scala:16:47]
wire _s2_new_hit_state_r_c_cat_T_7; // @[package.scala:16:47]
assign _s2_new_hit_state_r_c_cat_T_7 = _GEN_7; // @[package.scala:16:47]
wire _s2_new_hit_state_r_c_cat_T_30; // @[package.scala:16:47]
assign _s2_new_hit_state_r_c_cat_T_30 = _GEN_7; // @[package.scala:16:47]
wire _s2_send_resp_T_16; // @[package.scala:16:47]
assign _s2_send_resp_T_16 = _GEN_7; // @[package.scala:16:47]
wire _s2_send_store_ack_T_9; // @[package.scala:16:47]
assign _s2_send_store_ack_T_9 = _GEN_7; // @[package.scala:16:47]
wire _mshrs_io_req_0_valid_T_29; // @[package.scala:16:47]
assign _mshrs_io_req_0_valid_T_29 = _GEN_7; // @[package.scala:16:47]
wire _mshrs_io_req_0_valid_T_53; // @[package.scala:16:47]
assign _mshrs_io_req_0_valid_T_53 = _GEN_7; // @[package.scala:16:47]
wire _s3_valid_T_8; // @[package.scala:16:47]
assign _s3_valid_T_8 = _GEN_7; // @[package.scala:16:47]
wire _GEN_8 = s2_req_0_uop_mem_cmd == 5'hB; // @[package.scala:16:47]
wire _s2_has_permission_r_c_cat_T_8; // @[package.scala:16:47]
assign _s2_has_permission_r_c_cat_T_8 = _GEN_8; // @[package.scala:16:47]
wire _s2_has_permission_r_c_cat_T_31; // @[package.scala:16:47]
assign _s2_has_permission_r_c_cat_T_31 = _GEN_8; // @[package.scala:16:47]
wire _s2_new_hit_state_r_c_cat_T_8; // @[package.scala:16:47]
assign _s2_new_hit_state_r_c_cat_T_8 = _GEN_8; // @[package.scala:16:47]
wire _s2_new_hit_state_r_c_cat_T_31; // @[package.scala:16:47]
assign _s2_new_hit_state_r_c_cat_T_31 = _GEN_8; // @[package.scala:16:47]
wire _s2_send_resp_T_17; // @[package.scala:16:47]
assign _s2_send_resp_T_17 = _GEN_8; // @[package.scala:16:47]
wire _s2_send_store_ack_T_10; // @[package.scala:16:47]
assign _s2_send_store_ack_T_10 = _GEN_8; // @[package.scala:16:47]
wire _mshrs_io_req_0_valid_T_30; // @[package.scala:16:47]
assign _mshrs_io_req_0_valid_T_30 = _GEN_8; // @[package.scala:16:47]
wire _mshrs_io_req_0_valid_T_54; // @[package.scala:16:47]
assign _mshrs_io_req_0_valid_T_54 = _GEN_8; // @[package.scala:16:47]
wire _s3_valid_T_9; // @[package.scala:16:47]
assign _s3_valid_T_9 = _GEN_8; // @[package.scala:16:47]
wire _s2_has_permission_r_c_cat_T_9 = _s2_has_permission_r_c_cat_T_5 | _s2_has_permission_r_c_cat_T_6; // @[package.scala:16:47, :81:59]
wire _s2_has_permission_r_c_cat_T_10 = _s2_has_permission_r_c_cat_T_9 | _s2_has_permission_r_c_cat_T_7; // @[package.scala:16:47, :81:59]
wire _s2_has_permission_r_c_cat_T_11 = _s2_has_permission_r_c_cat_T_10 | _s2_has_permission_r_c_cat_T_8; // @[package.scala:16:47, :81:59]
wire _GEN_9 = s2_req_0_uop_mem_cmd == 5'h8; // @[package.scala:16:47]
wire _s2_has_permission_r_c_cat_T_12; // @[package.scala:16:47]
assign _s2_has_permission_r_c_cat_T_12 = _GEN_9; // @[package.scala:16:47]
wire _s2_has_permission_r_c_cat_T_35; // @[package.scala:16:47]
assign _s2_has_permission_r_c_cat_T_35 = _GEN_9; // @[package.scala:16:47]
wire _s2_new_hit_state_r_c_cat_T_12; // @[package.scala:16:47]
assign _s2_new_hit_state_r_c_cat_T_12 = _GEN_9; // @[package.scala:16:47]
wire _s2_new_hit_state_r_c_cat_T_35; // @[package.scala:16:47]
assign _s2_new_hit_state_r_c_cat_T_35 = _GEN_9; // @[package.scala:16:47]
wire _s2_send_resp_T_21; // @[package.scala:16:47]
assign _s2_send_resp_T_21 = _GEN_9; // @[package.scala:16:47]
wire _s2_send_store_ack_T_14; // @[package.scala:16:47]
assign _s2_send_store_ack_T_14 = _GEN_9; // @[package.scala:16:47]
wire _mshrs_io_req_0_valid_T_34; // @[package.scala:16:47]
assign _mshrs_io_req_0_valid_T_34 = _GEN_9; // @[package.scala:16:47]
wire _mshrs_io_req_0_valid_T_58; // @[package.scala:16:47]
assign _mshrs_io_req_0_valid_T_58 = _GEN_9; // @[package.scala:16:47]
wire _s3_valid_T_13; // @[package.scala:16:47]
assign _s3_valid_T_13 = _GEN_9; // @[package.scala:16:47]
wire _GEN_10 = s2_req_0_uop_mem_cmd == 5'hC; // @[package.scala:16:47]
wire _s2_has_permission_r_c_cat_T_13; // @[package.scala:16:47]
assign _s2_has_permission_r_c_cat_T_13 = _GEN_10; // @[package.scala:16:47]
wire _s2_has_permission_r_c_cat_T_36; // @[package.scala:16:47]
assign _s2_has_permission_r_c_cat_T_36 = _GEN_10; // @[package.scala:16:47]
wire _s2_new_hit_state_r_c_cat_T_13; // @[package.scala:16:47]
assign _s2_new_hit_state_r_c_cat_T_13 = _GEN_10; // @[package.scala:16:47]
wire _s2_new_hit_state_r_c_cat_T_36; // @[package.scala:16:47]
assign _s2_new_hit_state_r_c_cat_T_36 = _GEN_10; // @[package.scala:16:47]
wire _s2_send_resp_T_22; // @[package.scala:16:47]
assign _s2_send_resp_T_22 = _GEN_10; // @[package.scala:16:47]
wire _s2_send_store_ack_T_15; // @[package.scala:16:47]
assign _s2_send_store_ack_T_15 = _GEN_10; // @[package.scala:16:47]
wire _mshrs_io_req_0_valid_T_35; // @[package.scala:16:47]
assign _mshrs_io_req_0_valid_T_35 = _GEN_10; // @[package.scala:16:47]
wire _mshrs_io_req_0_valid_T_59; // @[package.scala:16:47]
assign _mshrs_io_req_0_valid_T_59 = _GEN_10; // @[package.scala:16:47]
wire _s3_valid_T_14; // @[package.scala:16:47]
assign _s3_valid_T_14 = _GEN_10; // @[package.scala:16:47]
wire _GEN_11 = s2_req_0_uop_mem_cmd == 5'hD; // @[package.scala:16:47]
wire _s2_has_permission_r_c_cat_T_14; // @[package.scala:16:47]
assign _s2_has_permission_r_c_cat_T_14 = _GEN_11; // @[package.scala:16:47]
wire _s2_has_permission_r_c_cat_T_37; // @[package.scala:16:47]
assign _s2_has_permission_r_c_cat_T_37 = _GEN_11; // @[package.scala:16:47]
wire _s2_new_hit_state_r_c_cat_T_14; // @[package.scala:16:47]
assign _s2_new_hit_state_r_c_cat_T_14 = _GEN_11; // @[package.scala:16:47]
wire _s2_new_hit_state_r_c_cat_T_37; // @[package.scala:16:47]
assign _s2_new_hit_state_r_c_cat_T_37 = _GEN_11; // @[package.scala:16:47]
wire _s2_send_resp_T_23; // @[package.scala:16:47]
assign _s2_send_resp_T_23 = _GEN_11; // @[package.scala:16:47]
wire _s2_send_store_ack_T_16; // @[package.scala:16:47]
assign _s2_send_store_ack_T_16 = _GEN_11; // @[package.scala:16:47]
wire _mshrs_io_req_0_valid_T_36; // @[package.scala:16:47]
assign _mshrs_io_req_0_valid_T_36 = _GEN_11; // @[package.scala:16:47]
wire _mshrs_io_req_0_valid_T_60; // @[package.scala:16:47]
assign _mshrs_io_req_0_valid_T_60 = _GEN_11; // @[package.scala:16:47]
wire _s3_valid_T_15; // @[package.scala:16:47]
assign _s3_valid_T_15 = _GEN_11; // @[package.scala:16:47]
wire _GEN_12 = s2_req_0_uop_mem_cmd == 5'hE; // @[package.scala:16:47]
wire _s2_has_permission_r_c_cat_T_15; // @[package.scala:16:47]
assign _s2_has_permission_r_c_cat_T_15 = _GEN_12; // @[package.scala:16:47]
wire _s2_has_permission_r_c_cat_T_38; // @[package.scala:16:47]
assign _s2_has_permission_r_c_cat_T_38 = _GEN_12; // @[package.scala:16:47]
wire _s2_new_hit_state_r_c_cat_T_15; // @[package.scala:16:47]
assign _s2_new_hit_state_r_c_cat_T_15 = _GEN_12; // @[package.scala:16:47]
wire _s2_new_hit_state_r_c_cat_T_38; // @[package.scala:16:47]
assign _s2_new_hit_state_r_c_cat_T_38 = _GEN_12; // @[package.scala:16:47]
wire _s2_send_resp_T_24; // @[package.scala:16:47]
assign _s2_send_resp_T_24 = _GEN_12; // @[package.scala:16:47]
wire _s2_send_store_ack_T_17; // @[package.scala:16:47]
assign _s2_send_store_ack_T_17 = _GEN_12; // @[package.scala:16:47]
wire _mshrs_io_req_0_valid_T_37; // @[package.scala:16:47]
assign _mshrs_io_req_0_valid_T_37 = _GEN_12; // @[package.scala:16:47]
wire _mshrs_io_req_0_valid_T_61; // @[package.scala:16:47]
assign _mshrs_io_req_0_valid_T_61 = _GEN_12; // @[package.scala:16:47]
wire _s3_valid_T_16; // @[package.scala:16:47]
assign _s3_valid_T_16 = _GEN_12; // @[package.scala:16:47]
wire _GEN_13 = s2_req_0_uop_mem_cmd == 5'hF; // @[package.scala:16:47]
wire _s2_has_permission_r_c_cat_T_16; // @[package.scala:16:47]
assign _s2_has_permission_r_c_cat_T_16 = _GEN_13; // @[package.scala:16:47]
wire _s2_has_permission_r_c_cat_T_39; // @[package.scala:16:47]
assign _s2_has_permission_r_c_cat_T_39 = _GEN_13; // @[package.scala:16:47]
wire _s2_new_hit_state_r_c_cat_T_16; // @[package.scala:16:47]
assign _s2_new_hit_state_r_c_cat_T_16 = _GEN_13; // @[package.scala:16:47]
wire _s2_new_hit_state_r_c_cat_T_39; // @[package.scala:16:47]
assign _s2_new_hit_state_r_c_cat_T_39 = _GEN_13; // @[package.scala:16:47]
wire _s2_send_resp_T_25; // @[package.scala:16:47]
assign _s2_send_resp_T_25 = _GEN_13; // @[package.scala:16:47]
wire _s2_send_store_ack_T_18; // @[package.scala:16:47]
assign _s2_send_store_ack_T_18 = _GEN_13; // @[package.scala:16:47]
wire _mshrs_io_req_0_valid_T_38; // @[package.scala:16:47]
assign _mshrs_io_req_0_valid_T_38 = _GEN_13; // @[package.scala:16:47]
wire _mshrs_io_req_0_valid_T_62; // @[package.scala:16:47]
assign _mshrs_io_req_0_valid_T_62 = _GEN_13; // @[package.scala:16:47]
wire _s3_valid_T_17; // @[package.scala:16:47]
assign _s3_valid_T_17 = _GEN_13; // @[package.scala:16:47]
wire _s2_has_permission_r_c_cat_T_17 = _s2_has_permission_r_c_cat_T_12 | _s2_has_permission_r_c_cat_T_13; // @[package.scala:16:47, :81:59]
wire _s2_has_permission_r_c_cat_T_18 = _s2_has_permission_r_c_cat_T_17 | _s2_has_permission_r_c_cat_T_14; // @[package.scala:16:47, :81:59]
wire _s2_has_permission_r_c_cat_T_19 = _s2_has_permission_r_c_cat_T_18 | _s2_has_permission_r_c_cat_T_15; // @[package.scala:16:47, :81:59]
wire _s2_has_permission_r_c_cat_T_20 = _s2_has_permission_r_c_cat_T_19 | _s2_has_permission_r_c_cat_T_16; // @[package.scala:16:47, :81:59]
wire _s2_has_permission_r_c_cat_T_21 = _s2_has_permission_r_c_cat_T_11 | _s2_has_permission_r_c_cat_T_20; // @[package.scala:81:59]
wire _s2_has_permission_r_c_cat_T_22 = _s2_has_permission_r_c_cat_T_4 | _s2_has_permission_r_c_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}]
wire _s2_has_permission_r_c_cat_T_25 = _s2_has_permission_r_c_cat_T_23 | _s2_has_permission_r_c_cat_T_24; // @[Consts.scala:90:{32,42,49}]
wire _s2_has_permission_r_c_cat_T_27 = _s2_has_permission_r_c_cat_T_25 | _s2_has_permission_r_c_cat_T_26; // @[Consts.scala:90:{42,59,66}]
wire _s2_has_permission_r_c_cat_T_32 = _s2_has_permission_r_c_cat_T_28 | _s2_has_permission_r_c_cat_T_29; // @[package.scala:16:47, :81:59]
wire _s2_has_permission_r_c_cat_T_33 = _s2_has_permission_r_c_cat_T_32 | _s2_has_permission_r_c_cat_T_30; // @[package.scala:16:47, :81:59]
wire _s2_has_permission_r_c_cat_T_34 = _s2_has_permission_r_c_cat_T_33 | _s2_has_permission_r_c_cat_T_31; // @[package.scala:16:47, :81:59]
wire _s2_has_permission_r_c_cat_T_40 = _s2_has_permission_r_c_cat_T_35 | _s2_has_permission_r_c_cat_T_36; // @[package.scala:16:47, :81:59]
wire _s2_has_permission_r_c_cat_T_41 = _s2_has_permission_r_c_cat_T_40 | _s2_has_permission_r_c_cat_T_37; // @[package.scala:16:47, :81:59]
wire _s2_has_permission_r_c_cat_T_42 = _s2_has_permission_r_c_cat_T_41 | _s2_has_permission_r_c_cat_T_38; // @[package.scala:16:47, :81:59]
wire _s2_has_permission_r_c_cat_T_43 = _s2_has_permission_r_c_cat_T_42 | _s2_has_permission_r_c_cat_T_39; // @[package.scala:16:47, :81:59]
wire _s2_has_permission_r_c_cat_T_44 = _s2_has_permission_r_c_cat_T_34 | _s2_has_permission_r_c_cat_T_43; // @[package.scala:81:59]
wire _s2_has_permission_r_c_cat_T_45 = _s2_has_permission_r_c_cat_T_27 | _s2_has_permission_r_c_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}]
wire _GEN_14 = s2_req_0_uop_mem_cmd == 5'h3; // @[Consts.scala:91:54]
wire _s2_has_permission_r_c_cat_T_46; // @[Consts.scala:91:54]
assign _s2_has_permission_r_c_cat_T_46 = _GEN_14; // @[Consts.scala:91:54]
wire _s2_new_hit_state_r_c_cat_T_46; // @[Consts.scala:91:54]
assign _s2_new_hit_state_r_c_cat_T_46 = _GEN_14; // @[Consts.scala:91:54]
wire _mshrs_io_req_0_valid_T_18; // @[Consts.scala:88:52]
assign _mshrs_io_req_0_valid_T_18 = _GEN_14; // @[Consts.scala:88:52, :91:54]
wire _s2_has_permission_r_c_cat_T_47 = _s2_has_permission_r_c_cat_T_45 | _s2_has_permission_r_c_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}]
wire _GEN_15 = s2_req_0_uop_mem_cmd == 5'h6; // @[Consts.scala:91:71]
wire _s2_has_permission_r_c_cat_T_48; // @[Consts.scala:91:71]
assign _s2_has_permission_r_c_cat_T_48 = _GEN_15; // @[Consts.scala:91:71]
wire _s2_new_hit_state_r_c_cat_T_48; // @[Consts.scala:91:71]
assign _s2_new_hit_state_r_c_cat_T_48 = _GEN_15; // @[Consts.scala:91:71]
wire _s2_lr_T; // @[dcache.scala:701:37]
assign _s2_lr_T = _GEN_15; // @[Consts.scala:91:71]
wire _s2_send_resp_T_9; // @[package.scala:16:47]
assign _s2_send_resp_T_9 = _GEN_15; // @[package.scala:16:47]
wire _mshrs_io_req_0_valid_T_22; // @[package.scala:16:47]
assign _mshrs_io_req_0_valid_T_22 = _GEN_15; // @[package.scala:16:47]
wire _s2_has_permission_r_c_cat_T_49 = _s2_has_permission_r_c_cat_T_47 | _s2_has_permission_r_c_cat_T_48; // @[Consts.scala:91:{47,64,71}]
wire [1:0] s2_has_permission_r_c = {_s2_has_permission_r_c_cat_T_22, _s2_has_permission_r_c_cat_T_49}; // @[Metadata.scala:29:18]
wire [3:0] _s2_has_permission_r_T = {s2_has_permission_r_c, s2_hit_state_0_state}; // @[Metadata.scala:29:18, :58:19]
wire _s2_has_permission_r_T_25 = _s2_has_permission_r_T == 4'hC; // @[Misc.scala:49:20]
wire [1:0] _s2_has_permission_r_T_27 = {1'h0, _s2_has_permission_r_T_25}; // @[Misc.scala:35:36, :49:20]
wire _s2_has_permission_r_T_28 = _s2_has_permission_r_T == 4'hD; // @[Misc.scala:49:20]
wire [1:0] _s2_has_permission_r_T_30 = _s2_has_permission_r_T_28 ? 2'h2 : _s2_has_permission_r_T_27; // @[Misc.scala:35:36, :49:20]
wire _s2_has_permission_r_T_31 = _s2_has_permission_r_T == 4'h4; // @[Misc.scala:49:20]
wire [1:0] _s2_has_permission_r_T_33 = _s2_has_permission_r_T_31 ? 2'h1 : _s2_has_permission_r_T_30; // @[Misc.scala:35:36, :49:20]
wire _s2_has_permission_r_T_34 = _s2_has_permission_r_T == 4'h5; // @[Misc.scala:49:20]
wire [1:0] _s2_has_permission_r_T_36 = _s2_has_permission_r_T_34 ? 2'h2 : _s2_has_permission_r_T_33; // @[Misc.scala:35:36, :49:20]
wire _s2_has_permission_r_T_37 = _s2_has_permission_r_T == 4'h0; // @[Misc.scala:49:20]
wire [1:0] _s2_has_permission_r_T_39 = _s2_has_permission_r_T_37 ? 2'h0 : _s2_has_permission_r_T_36; // @[Misc.scala:35:36, :49:20]
wire _s2_has_permission_r_T_40 = _s2_has_permission_r_T == 4'hE; // @[Misc.scala:49:20]
wire _s2_has_permission_r_T_41 = _s2_has_permission_r_T_40; // @[Misc.scala:35:9, :49:20]
wire [1:0] _s2_has_permission_r_T_42 = _s2_has_permission_r_T_40 ? 2'h3 : _s2_has_permission_r_T_39; // @[Misc.scala:35:36, :49:20]
wire _s2_has_permission_r_T_43 = &_s2_has_permission_r_T; // @[Misc.scala:49:20]
wire _s2_has_permission_r_T_44 = _s2_has_permission_r_T_43 | _s2_has_permission_r_T_41; // @[Misc.scala:35:9, :49:20]
wire [1:0] _s2_has_permission_r_T_45 = _s2_has_permission_r_T_43 ? 2'h3 : _s2_has_permission_r_T_42; // @[Misc.scala:35:36, :49:20]
wire _s2_has_permission_r_T_46 = _s2_has_permission_r_T == 4'h6; // @[Misc.scala:49:20]
wire _s2_has_permission_r_T_47 = _s2_has_permission_r_T_46 | _s2_has_permission_r_T_44; // @[Misc.scala:35:9, :49:20]
wire [1:0] _s2_has_permission_r_T_48 = _s2_has_permission_r_T_46 ? 2'h2 : _s2_has_permission_r_T_45; // @[Misc.scala:35:36, :49:20]
wire _s2_has_permission_r_T_49 = _s2_has_permission_r_T == 4'h7; // @[Misc.scala:49:20]
wire _s2_has_permission_r_T_50 = _s2_has_permission_r_T_49 | _s2_has_permission_r_T_47; // @[Misc.scala:35:9, :49:20]
wire [1:0] _s2_has_permission_r_T_51 = _s2_has_permission_r_T_49 ? 2'h3 : _s2_has_permission_r_T_48; // @[Misc.scala:35:36, :49:20]
wire _s2_has_permission_r_T_52 = _s2_has_permission_r_T == 4'h1; // @[Misc.scala:49:20]
wire _s2_has_permission_r_T_53 = _s2_has_permission_r_T_52 | _s2_has_permission_r_T_50; // @[Misc.scala:35:9, :49:20]
wire [1:0] _s2_has_permission_r_T_54 = _s2_has_permission_r_T_52 ? 2'h1 : _s2_has_permission_r_T_51; // @[Misc.scala:35:36, :49:20]
wire _s2_has_permission_r_T_55 = _s2_has_permission_r_T == 4'h2; // @[Misc.scala:49:20]
wire _s2_has_permission_r_T_56 = _s2_has_permission_r_T_55 | _s2_has_permission_r_T_53; // @[Misc.scala:35:9, :49:20]
wire [1:0] _s2_has_permission_r_T_57 = _s2_has_permission_r_T_55 ? 2'h2 : _s2_has_permission_r_T_54; // @[Misc.scala:35:36, :49:20]
wire _s2_has_permission_r_T_58 = _s2_has_permission_r_T == 4'h3; // @[Misc.scala:49:20]
wire s2_has_permission_r_1 = _s2_has_permission_r_T_58 | _s2_has_permission_r_T_56; // @[Misc.scala:35:9, :49:20]
wire s2_has_permission_0 = s2_has_permission_r_1; // @[Misc.scala:35:9]
wire [1:0] s2_has_permission_r_2 = _s2_has_permission_r_T_58 ? 2'h3 : _s2_has_permission_r_T_57; // @[Misc.scala:35:36, :49:20]
wire [1:0] s2_has_permission_meta_state = s2_has_permission_r_2; // @[Misc.scala:35:36]
wire _s2_new_hit_state_r_c_cat_T_2 = _s2_new_hit_state_r_c_cat_T | _s2_new_hit_state_r_c_cat_T_1; // @[Consts.scala:90:{32,42,49}]
wire _s2_new_hit_state_r_c_cat_T_4 = _s2_new_hit_state_r_c_cat_T_2 | _s2_new_hit_state_r_c_cat_T_3; // @[Consts.scala:90:{42,59,66}]
wire _s2_new_hit_state_r_c_cat_T_9 = _s2_new_hit_state_r_c_cat_T_5 | _s2_new_hit_state_r_c_cat_T_6; // @[package.scala:16:47, :81:59]
wire _s2_new_hit_state_r_c_cat_T_10 = _s2_new_hit_state_r_c_cat_T_9 | _s2_new_hit_state_r_c_cat_T_7; // @[package.scala:16:47, :81:59]
wire _s2_new_hit_state_r_c_cat_T_11 = _s2_new_hit_state_r_c_cat_T_10 | _s2_new_hit_state_r_c_cat_T_8; // @[package.scala:16:47, :81:59]
wire _s2_new_hit_state_r_c_cat_T_17 = _s2_new_hit_state_r_c_cat_T_12 | _s2_new_hit_state_r_c_cat_T_13; // @[package.scala:16:47, :81:59]
wire _s2_new_hit_state_r_c_cat_T_18 = _s2_new_hit_state_r_c_cat_T_17 | _s2_new_hit_state_r_c_cat_T_14; // @[package.scala:16:47, :81:59]
wire _s2_new_hit_state_r_c_cat_T_19 = _s2_new_hit_state_r_c_cat_T_18 | _s2_new_hit_state_r_c_cat_T_15; // @[package.scala:16:47, :81:59]
wire _s2_new_hit_state_r_c_cat_T_20 = _s2_new_hit_state_r_c_cat_T_19 | _s2_new_hit_state_r_c_cat_T_16; // @[package.scala:16:47, :81:59]
wire _s2_new_hit_state_r_c_cat_T_21 = _s2_new_hit_state_r_c_cat_T_11 | _s2_new_hit_state_r_c_cat_T_20; // @[package.scala:81:59]
wire _s2_new_hit_state_r_c_cat_T_22 = _s2_new_hit_state_r_c_cat_T_4 | _s2_new_hit_state_r_c_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}]
wire _s2_new_hit_state_r_c_cat_T_25 = _s2_new_hit_state_r_c_cat_T_23 | _s2_new_hit_state_r_c_cat_T_24; // @[Consts.scala:90:{32,42,49}]
wire _s2_new_hit_state_r_c_cat_T_27 = _s2_new_hit_state_r_c_cat_T_25 | _s2_new_hit_state_r_c_cat_T_26; // @[Consts.scala:90:{42,59,66}]
wire _s2_new_hit_state_r_c_cat_T_32 = _s2_new_hit_state_r_c_cat_T_28 | _s2_new_hit_state_r_c_cat_T_29; // @[package.scala:16:47, :81:59]
wire _s2_new_hit_state_r_c_cat_T_33 = _s2_new_hit_state_r_c_cat_T_32 | _s2_new_hit_state_r_c_cat_T_30; // @[package.scala:16:47, :81:59]
wire _s2_new_hit_state_r_c_cat_T_34 = _s2_new_hit_state_r_c_cat_T_33 | _s2_new_hit_state_r_c_cat_T_31; // @[package.scala:16:47, :81:59]
wire _s2_new_hit_state_r_c_cat_T_40 = _s2_new_hit_state_r_c_cat_T_35 | _s2_new_hit_state_r_c_cat_T_36; // @[package.scala:16:47, :81:59]
wire _s2_new_hit_state_r_c_cat_T_41 = _s2_new_hit_state_r_c_cat_T_40 | _s2_new_hit_state_r_c_cat_T_37; // @[package.scala:16:47, :81:59]
wire _s2_new_hit_state_r_c_cat_T_42 = _s2_new_hit_state_r_c_cat_T_41 | _s2_new_hit_state_r_c_cat_T_38; // @[package.scala:16:47, :81:59]
wire _s2_new_hit_state_r_c_cat_T_43 = _s2_new_hit_state_r_c_cat_T_42 | _s2_new_hit_state_r_c_cat_T_39; // @[package.scala:16:47, :81:59]
wire _s2_new_hit_state_r_c_cat_T_44 = _s2_new_hit_state_r_c_cat_T_34 | _s2_new_hit_state_r_c_cat_T_43; // @[package.scala:81:59]
wire _s2_new_hit_state_r_c_cat_T_45 = _s2_new_hit_state_r_c_cat_T_27 | _s2_new_hit_state_r_c_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}]
wire _s2_new_hit_state_r_c_cat_T_47 = _s2_new_hit_state_r_c_cat_T_45 | _s2_new_hit_state_r_c_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}]
wire _s2_new_hit_state_r_c_cat_T_49 = _s2_new_hit_state_r_c_cat_T_47 | _s2_new_hit_state_r_c_cat_T_48; // @[Consts.scala:91:{47,64,71}]
wire [1:0] s2_new_hit_state_r_c = {_s2_new_hit_state_r_c_cat_T_22, _s2_new_hit_state_r_c_cat_T_49}; // @[Metadata.scala:29:18]
wire [3:0] _s2_new_hit_state_r_T = {s2_new_hit_state_r_c, s2_hit_state_0_state}; // @[Metadata.scala:29:18, :58:19]
wire _s2_new_hit_state_r_T_25 = _s2_new_hit_state_r_T == 4'hC; // @[Misc.scala:49:20]
wire [1:0] _s2_new_hit_state_r_T_27 = {1'h0, _s2_new_hit_state_r_T_25}; // @[Misc.scala:35:36, :49:20]
wire _s2_new_hit_state_r_T_28 = _s2_new_hit_state_r_T == 4'hD; // @[Misc.scala:49:20]
wire [1:0] _s2_new_hit_state_r_T_30 = _s2_new_hit_state_r_T_28 ? 2'h2 : _s2_new_hit_state_r_T_27; // @[Misc.scala:35:36, :49:20]
wire _s2_new_hit_state_r_T_31 = _s2_new_hit_state_r_T == 4'h4; // @[Misc.scala:49:20]
wire [1:0] _s2_new_hit_state_r_T_33 = _s2_new_hit_state_r_T_31 ? 2'h1 : _s2_new_hit_state_r_T_30; // @[Misc.scala:35:36, :49:20]
wire _s2_new_hit_state_r_T_34 = _s2_new_hit_state_r_T == 4'h5; // @[Misc.scala:49:20]
wire [1:0] _s2_new_hit_state_r_T_36 = _s2_new_hit_state_r_T_34 ? 2'h2 : _s2_new_hit_state_r_T_33; // @[Misc.scala:35:36, :49:20]
wire _s2_new_hit_state_r_T_37 = _s2_new_hit_state_r_T == 4'h0; // @[Misc.scala:49:20]
wire [1:0] _s2_new_hit_state_r_T_39 = _s2_new_hit_state_r_T_37 ? 2'h0 : _s2_new_hit_state_r_T_36; // @[Misc.scala:35:36, :49:20]
wire _s2_new_hit_state_r_T_40 = _s2_new_hit_state_r_T == 4'hE; // @[Misc.scala:49:20]
wire _s2_new_hit_state_r_T_41 = _s2_new_hit_state_r_T_40; // @[Misc.scala:35:9, :49:20]
wire [1:0] _s2_new_hit_state_r_T_42 = _s2_new_hit_state_r_T_40 ? 2'h3 : _s2_new_hit_state_r_T_39; // @[Misc.scala:35:36, :49:20]
wire _s2_new_hit_state_r_T_43 = &_s2_new_hit_state_r_T; // @[Misc.scala:49:20]
wire _s2_new_hit_state_r_T_44 = _s2_new_hit_state_r_T_43 | _s2_new_hit_state_r_T_41; // @[Misc.scala:35:9, :49:20]
wire [1:0] _s2_new_hit_state_r_T_45 = _s2_new_hit_state_r_T_43 ? 2'h3 : _s2_new_hit_state_r_T_42; // @[Misc.scala:35:36, :49:20]
wire _s2_new_hit_state_r_T_46 = _s2_new_hit_state_r_T == 4'h6; // @[Misc.scala:49:20]
wire _s2_new_hit_state_r_T_47 = _s2_new_hit_state_r_T_46 | _s2_new_hit_state_r_T_44; // @[Misc.scala:35:9, :49:20]
wire [1:0] _s2_new_hit_state_r_T_48 = _s2_new_hit_state_r_T_46 ? 2'h2 : _s2_new_hit_state_r_T_45; // @[Misc.scala:35:36, :49:20]
wire _s2_new_hit_state_r_T_49 = _s2_new_hit_state_r_T == 4'h7; // @[Misc.scala:49:20]
wire _s2_new_hit_state_r_T_50 = _s2_new_hit_state_r_T_49 | _s2_new_hit_state_r_T_47; // @[Misc.scala:35:9, :49:20]
wire [1:0] _s2_new_hit_state_r_T_51 = _s2_new_hit_state_r_T_49 ? 2'h3 : _s2_new_hit_state_r_T_48; // @[Misc.scala:35:36, :49:20]
wire _s2_new_hit_state_r_T_52 = _s2_new_hit_state_r_T == 4'h1; // @[Misc.scala:49:20]
wire _s2_new_hit_state_r_T_53 = _s2_new_hit_state_r_T_52 | _s2_new_hit_state_r_T_50; // @[Misc.scala:35:9, :49:20]
wire [1:0] _s2_new_hit_state_r_T_54 = _s2_new_hit_state_r_T_52 ? 2'h1 : _s2_new_hit_state_r_T_51; // @[Misc.scala:35:36, :49:20]
wire _s2_new_hit_state_r_T_55 = _s2_new_hit_state_r_T == 4'h2; // @[Misc.scala:49:20]
wire _s2_new_hit_state_r_T_56 = _s2_new_hit_state_r_T_55 | _s2_new_hit_state_r_T_53; // @[Misc.scala:35:9, :49:20]
wire [1:0] _s2_new_hit_state_r_T_57 = _s2_new_hit_state_r_T_55 ? 2'h2 : _s2_new_hit_state_r_T_54; // @[Misc.scala:35:36, :49:20]
wire _s2_new_hit_state_r_T_58 = _s2_new_hit_state_r_T == 4'h3; // @[Misc.scala:49:20]
wire s2_new_hit_state_r_1 = _s2_new_hit_state_r_T_58 | _s2_new_hit_state_r_T_56; // @[Misc.scala:35:9, :49:20]
wire [1:0] s2_new_hit_state_r_2 = _s2_new_hit_state_r_T_58 ? 2'h3 : _s2_new_hit_state_r_T_57; // @[Misc.scala:35:36, :49:20]
wire [1:0] s2_new_hit_state_meta_state = s2_new_hit_state_r_2; // @[Misc.scala:35:36]
wire [1:0] s2_new_hit_state_0_state = s2_new_hit_state_meta_state; // @[Metadata.scala:160:20]
wire _s2_hit_T = s2_tag_match_0 & s2_has_permission_0; // @[dcache.scala:454:49, :682:49, :687:47]
wire _s2_hit_T_1 = s2_hit_state_0_state == s2_new_hit_state_0_state; // @[Metadata.scala:46:46]
wire _s2_hit_T_2 = _s2_hit_T & _s2_hit_T_1; // @[Metadata.scala:46:46]
wire _s2_hit_T_3 = ~_mshrs_io_block_hit_0; // @[dcache.scala:460:21, :687:117]
wire _s2_hit_T_4 = _s2_hit_T_2 & _s2_hit_T_3; // @[dcache.scala:687:{71,114,117}]
wire _T_75 = s2_type == 3'h0; // @[package.scala:16:47]
wire _s2_hit_T_5; // @[package.scala:16:47]
assign _s2_hit_T_5 = _T_75; // @[package.scala:16:47]
wire _s2_lr_T_2; // @[dcache.scala:701:83]
assign _s2_lr_T_2 = _T_75; // @[package.scala:16:47]
wire _s2_sc_T_2; // @[dcache.scala:702:83]
assign _s2_sc_T_2 = _T_75; // @[package.scala:16:47]
wire _s2_send_resp_T_3; // @[dcache.scala:774:77]
assign _s2_send_resp_T_3 = _T_75; // @[package.scala:16:47]
wire _s2_hit_T_6 = s2_type == 3'h2; // @[package.scala:16:47]
wire _s2_hit_T_7 = _s2_hit_T_5 | _s2_hit_T_6; // @[package.scala:16:47, :81:59]
wire _s2_hit_T_8 = _s2_hit_T_4 | _s2_hit_T_7; // @[package.scala:81:59]
wire s2_hit_0 = _s2_hit_T_8; // @[dcache.scala:454:49, :687:141]
wire s2_nack_0; // @[dcache.scala:688:21]
reg s2_wb_idx_matches_0; // @[dcache.scala:692:34]
reg [33:0] debug_sc_fail_addr; // @[dcache.scala:695:35]
reg [7:0] debug_sc_fail_cnt; // @[dcache.scala:696:35]
reg [6:0] lrsc_count; // @[dcache.scala:698:27]
wire lrsc_valid = |(lrsc_count[6:2]); // @[dcache.scala:698:27, :699:31]
reg [27:0] lrsc_addr; // @[dcache.scala:700:23]
reg s2_lr_REG; // @[dcache.scala:701:59]
wire _s2_lr_T_1 = ~s2_lr_REG; // @[dcache.scala:701:{51,59}]
wire _s2_lr_T_3 = _s2_lr_T_1 | _s2_lr_T_2; // @[dcache.scala:701:{51,72,83}]
wire s2_lr = _s2_lr_T & _s2_lr_T_3; // @[dcache.scala:701:{37,47,72}]
reg s2_sc_REG; // @[dcache.scala:702:59]
wire _s2_sc_T_1 = ~s2_sc_REG; // @[dcache.scala:702:{51,59}]
wire _s2_sc_T_3 = _s2_sc_T_1 | _s2_sc_T_2; // @[dcache.scala:702:{51,72,83}]
wire s2_sc = _s2_sc_T & _s2_sc_T_3; // @[dcache.scala:702:{37,47,72}]
wire io_lsu_resp_0_bits_data_doZero_2 = s2_sc; // @[AMOALU.scala:43:31]
wire [27:0] _s2_lrsc_addr_match_T = s2_req_0_addr[33:6]; // @[dcache.scala:670:25, :703:86]
wire [27:0] _lrsc_addr_T = s2_req_0_addr[33:6]; // @[dcache.scala:670:25, :703:86, :710:35]
wire _s2_lrsc_addr_match_T_1 = lrsc_addr == _s2_lrsc_addr_match_T; // @[dcache.scala:700:23, :703:{66,86}]
wire _s2_lrsc_addr_match_T_2 = lrsc_valid & _s2_lrsc_addr_match_T_1; // @[dcache.scala:699:31, :703:{53,66}]
wire s2_lrsc_addr_match_0 = _s2_lrsc_addr_match_T_2; // @[dcache.scala:454:49, :703:53]
wire _s2_sc_fail_T = ~s2_lrsc_addr_match_0; // @[dcache.scala:454:49, :704:29]
wire s2_sc_fail = s2_sc & _s2_sc_fail_T; // @[dcache.scala:702:47, :704:{26,29}]
wire [7:0] _lrsc_count_T = {1'h0, lrsc_count} - 8'h1; // @[dcache.scala:698:27, :705:54]
wire [6:0] _lrsc_count_T_1 = _lrsc_count_T[6:0]; // @[dcache.scala:705:54]
wire _mshrs_io_req_0_valid_T_10 = s2_type == 3'h4; // @[package.scala:16:47]
wire [8:0] _debug_sc_fail_cnt_T = {1'h0, debug_sc_fail_cnt} + 9'h1; // @[dcache.scala:696:35, :730:48]
wire [7:0] _debug_sc_fail_cnt_T_1 = _debug_sc_fail_cnt_T[7:0]; // @[dcache.scala:730:48]
wire [63:0] s2_data_0_0; // @[dcache.scala:743:21]
wire [63:0] s2_data_0_1; // @[dcache.scala:743:21]
wire [63:0] _s2_data_muxed_T_2 = _s2_data_muxed_T ? s2_data_0_0 : 64'h0; // @[Mux.scala:30:73, :32:36]
wire [63:0] _s2_data_muxed_T_3 = _s2_data_muxed_T_1 ? s2_data_0_1 : 64'h0; // @[Mux.scala:30:73, :32:36]
wire [63:0] _s2_data_muxed_T_4 = _s2_data_muxed_T_2 | _s2_data_muxed_T_3; // @[Mux.scala:30:73]
wire [63:0] _s2_data_muxed_WIRE = _s2_data_muxed_T_4; // @[Mux.scala:30:73]
wire [63:0] s2_data_muxed_0 = _s2_data_muxed_WIRE; // @[Mux.scala:30:73]
wire [63:0] _s2_data_word_prebypass_T_1 = s2_data_muxed_0; // @[dcache.scala:454:49, :868:63]
wire replace; // @[Replacement.scala:37:29]
wire [1:0] lfsr_lo_lo_lo = {_lfsr_prng_io_out_1, _lfsr_prng_io_out_0}; // @[PRNG.scala:91:22, :95:17]
wire [1:0] lfsr_lo_lo_hi = {_lfsr_prng_io_out_3, _lfsr_prng_io_out_2}; // @[PRNG.scala:91:22, :95:17]
wire [3:0] lfsr_lo_lo = {lfsr_lo_lo_hi, lfsr_lo_lo_lo}; // @[PRNG.scala:95:17]
wire [1:0] lfsr_lo_hi_lo = {_lfsr_prng_io_out_5, _lfsr_prng_io_out_4}; // @[PRNG.scala:91:22, :95:17]
wire [1:0] lfsr_lo_hi_hi = {_lfsr_prng_io_out_7, _lfsr_prng_io_out_6}; // @[PRNG.scala:91:22, :95:17]
wire [3:0] lfsr_lo_hi = {lfsr_lo_hi_hi, lfsr_lo_hi_lo}; // @[PRNG.scala:95:17]
wire [7:0] lfsr_lo = {lfsr_lo_hi, lfsr_lo_lo}; // @[PRNG.scala:95:17]
wire [1:0] lfsr_hi_lo_lo = {_lfsr_prng_io_out_9, _lfsr_prng_io_out_8}; // @[PRNG.scala:91:22, :95:17]
wire [1:0] lfsr_hi_lo_hi = {_lfsr_prng_io_out_11, _lfsr_prng_io_out_10}; // @[PRNG.scala:91:22, :95:17]
wire [3:0] lfsr_hi_lo = {lfsr_hi_lo_hi, lfsr_hi_lo_lo}; // @[PRNG.scala:95:17]
wire [1:0] lfsr_hi_hi_lo = {_lfsr_prng_io_out_13, _lfsr_prng_io_out_12}; // @[PRNG.scala:91:22, :95:17]
wire [1:0] lfsr_hi_hi_hi = {_lfsr_prng_io_out_15, _lfsr_prng_io_out_14}; // @[PRNG.scala:91:22, :95:17]
wire [3:0] lfsr_hi_hi = {lfsr_hi_hi_hi, lfsr_hi_hi_lo}; // @[PRNG.scala:95:17]
wire [7:0] lfsr_hi = {lfsr_hi_hi, lfsr_hi_lo}; // @[PRNG.scala:95:17]
wire [15:0] lfsr = {lfsr_hi, lfsr_lo}; // @[PRNG.scala:95:17]
wire _s1_replaced_way_en_T = lfsr[0]; // @[PRNG.scala:95:17]
wire _s2_replaced_way_en_T = lfsr[0]; // @[PRNG.scala:95:17]
wire [1:0] s1_replaced_way_en = 2'h1 << _s1_replaced_way_en_T; // @[OneHot.scala:58:35]
reg s2_replaced_way_en_REG; // @[dcache.scala:756:44]
wire [1:0] s2_replaced_way_en = 2'h1 << s2_replaced_way_en_REG; // @[OneHot.scala:58:35]
reg [1:0] s2_repl_meta_REG_coh_state; // @[dcache.scala:757:88]
wire [1:0] _s2_repl_meta_WIRE_0_coh_state = s2_repl_meta_REG_coh_state; // @[dcache.scala:656:47, :757:88]
reg [21:0] s2_repl_meta_REG_tag; // @[dcache.scala:757:88]
wire [21:0] _s2_repl_meta_WIRE_0_tag = s2_repl_meta_REG_tag; // @[dcache.scala:656:47, :757:88]
reg [1:0] s2_repl_meta_REG_1_coh_state; // @[dcache.scala:757:88]
wire [1:0] _s2_repl_meta_WIRE_1_coh_state_0 = s2_repl_meta_REG_1_coh_state; // @[dcache.scala:656:47, :757:88]
reg [21:0] s2_repl_meta_REG_1_tag; // @[dcache.scala:757:88]
wire [21:0] _s2_repl_meta_WIRE_1_tag_0 = s2_repl_meta_REG_1_tag; // @[dcache.scala:656:47, :757:88]
wire _s2_repl_meta_T = s2_replaced_way_en[0]; // @[OneHot.scala:58:35]
wire _s2_repl_meta_T_1 = s2_replaced_way_en[1]; // @[OneHot.scala:58:35]
wire [1:0] _s2_repl_meta_WIRE_3_state; // @[Mux.scala:30:73]
wire [21:0] _s2_repl_meta_WIRE_2; // @[Mux.scala:30:73]
wire [1:0] s2_repl_meta_0_coh_state = _s2_repl_meta_WIRE_1_coh_state; // @[Mux.scala:30:73]
wire [21:0] s2_repl_meta_0_tag = _s2_repl_meta_WIRE_1_tag; // @[Mux.scala:30:73]
wire [21:0] _s2_repl_meta_T_2 = _s2_repl_meta_T ? _s2_repl_meta_WIRE_0_tag : 22'h0; // @[Mux.scala:30:73, :32:36]
wire [21:0] _s2_repl_meta_T_3 = _s2_repl_meta_T_1 ? _s2_repl_meta_WIRE_1_tag_0 : 22'h0; // @[Mux.scala:30:73, :32:36]
wire [21:0] _s2_repl_meta_T_4 = _s2_repl_meta_T_2 | _s2_repl_meta_T_3; // @[Mux.scala:30:73]
assign _s2_repl_meta_WIRE_2 = _s2_repl_meta_T_4; // @[Mux.scala:30:73]
assign _s2_repl_meta_WIRE_1_tag = _s2_repl_meta_WIRE_2; // @[Mux.scala:30:73]
wire [1:0] _s2_repl_meta_WIRE_4; // @[Mux.scala:30:73]
assign _s2_repl_meta_WIRE_1_coh_state = _s2_repl_meta_WIRE_3_state; // @[Mux.scala:30:73]
wire [1:0] _s2_repl_meta_T_5 = _s2_repl_meta_T ? _s2_repl_meta_WIRE_0_coh_state : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _s2_repl_meta_T_6 = _s2_repl_meta_T_1 ? _s2_repl_meta_WIRE_1_coh_state_0 : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _s2_repl_meta_T_7 = _s2_repl_meta_T_5 | _s2_repl_meta_T_6; // @[Mux.scala:30:73]
assign _s2_repl_meta_WIRE_4 = _s2_repl_meta_T_7; // @[Mux.scala:30:73]
assign _s2_repl_meta_WIRE_3_state = _s2_repl_meta_WIRE_4; // @[Mux.scala:30:73]
wire [21:0] mshrs_io_req_0_bits_old_meta_meta_tag = s2_repl_meta_0_tag; // @[HellaCache.scala:305:20]
reg s2_nack_hit_0; // @[dcache.scala:760:31]
wire _GEN_16 = s2_valid_0 & s2_hit_0; // @[dcache.scala:454:49, :762:50]
wire _s2_nack_victim_T; // @[dcache.scala:762:50]
assign _s2_nack_victim_T = _GEN_16; // @[dcache.scala:762:50]
wire _s3_valid_T; // @[dcache.scala:897:38]
assign _s3_valid_T = _GEN_16; // @[dcache.scala:762:50, :897:38]
wire _s2_nack_victim_T_1 = _s2_nack_victim_T & _mshrs_io_secondary_miss_0; // @[dcache.scala:460:21, :762:{50,64}]
wire s2_nack_victim_0 = _s2_nack_victim_T_1; // @[dcache.scala:454:49, :762:64]
wire _s2_nack_miss_T = ~s2_hit_0; // @[dcache.scala:454:49, :689:36, :764:53]
wire _s2_nack_miss_T_1 = s2_valid_0 & _s2_nack_miss_T; // @[dcache.scala:454:49, :764:{50,53}]
wire _s2_nack_miss_T_2 = ~_mshrs_io_req_0_ready; // @[dcache.scala:460:21, :764:67]
wire _s2_nack_miss_T_3 = _s2_nack_miss_T_1 & _s2_nack_miss_T_2; // @[dcache.scala:764:{50,64,67}]
wire s2_nack_miss_0 = _s2_nack_miss_T_3; // @[dcache.scala:454:49, :764:64]
wire _s2_nack_wb_T = ~s2_hit_0; // @[dcache.scala:454:49, :689:36, :768:53]
wire _s2_nack_wb_T_1 = s2_valid_0 & _s2_nack_wb_T; // @[dcache.scala:454:49, :768:{50,53}]
wire _s2_nack_wb_T_2 = _s2_nack_wb_T_1 & s2_wb_idx_matches_0; // @[dcache.scala:692:34, :768:{50,64}]
wire s2_nack_wb_0 = _s2_nack_wb_T_2; // @[dcache.scala:454:49, :768:64]
assign s2_nack_0 = (s2_nack_miss_0 | s2_nack_hit_0 | s2_nack_victim_0 | s2_nack_wb_0) & (|s2_type); // @[dcache.scala:454:49, :671:25, :688:21, :760:31, :770:{55,73,113,131,142}]
reg s2_send_resp_REG; // @[dcache.scala:773:12]
wire _s2_send_resp_T = s2_nack_hit_0 | s2_nack_victim_0; // @[dcache.scala:454:49, :760:31, :774:25]
wire _s2_send_resp_T_1 = _s2_send_resp_T; // @[dcache.scala:774:{25,46}]
wire _s2_send_resp_T_2 = ~_s2_send_resp_T_1; // @[dcache.scala:774:{8,46}]
wire _s2_send_resp_T_4 = _s2_send_resp_T_2 | _s2_send_resp_T_3; // @[dcache.scala:774:{8,66,77}]
wire _s2_send_resp_T_5 = s2_send_resp_REG & _s2_send_resp_T_4; // @[dcache.scala:773:{12,38}, :774:66]
wire _s2_send_resp_T_6 = _s2_send_resp_T_5 & s2_hit_0; // @[dcache.scala:454:49, :773:38, :774:91]
wire _GEN_17 = s2_req_0_uop_mem_cmd == 5'h0; // @[package.scala:16:47]
wire _s2_send_resp_T_7; // @[package.scala:16:47]
assign _s2_send_resp_T_7 = _GEN_17; // @[package.scala:16:47]
wire _mshrs_io_req_0_valid_T_20; // @[package.scala:16:47]
assign _mshrs_io_req_0_valid_T_20 = _GEN_17; // @[package.scala:16:47]
wire _GEN_18 = s2_req_0_uop_mem_cmd == 5'h10; // @[package.scala:16:47]
wire _s2_send_resp_T_8; // @[package.scala:16:47]
assign _s2_send_resp_T_8 = _GEN_18; // @[package.scala:16:47]
wire _mshrs_io_req_0_valid_T_21; // @[package.scala:16:47]
assign _mshrs_io_req_0_valid_T_21 = _GEN_18; // @[package.scala:16:47]
wire _s2_send_resp_T_11 = _s2_send_resp_T_7 | _s2_send_resp_T_8; // @[package.scala:16:47, :81:59]
wire _s2_send_resp_T_12 = _s2_send_resp_T_11 | _s2_send_resp_T_9; // @[package.scala:16:47, :81:59]
wire _s2_send_resp_T_13 = _s2_send_resp_T_12 | _s2_send_resp_T_10; // @[package.scala:16:47, :81:59]
wire _s2_send_resp_T_18 = _s2_send_resp_T_14 | _s2_send_resp_T_15; // @[package.scala:16:47, :81:59]
wire _s2_send_resp_T_19 = _s2_send_resp_T_18 | _s2_send_resp_T_16; // @[package.scala:16:47, :81:59]
wire _s2_send_resp_T_20 = _s2_send_resp_T_19 | _s2_send_resp_T_17; // @[package.scala:16:47, :81:59]
wire _s2_send_resp_T_26 = _s2_send_resp_T_21 | _s2_send_resp_T_22; // @[package.scala:16:47, :81:59]
wire _s2_send_resp_T_27 = _s2_send_resp_T_26 | _s2_send_resp_T_23; // @[package.scala:16:47, :81:59]
wire _s2_send_resp_T_28 = _s2_send_resp_T_27 | _s2_send_resp_T_24; // @[package.scala:16:47, :81:59]
wire _s2_send_resp_T_29 = _s2_send_resp_T_28 | _s2_send_resp_T_25; // @[package.scala:16:47, :81:59]
wire _s2_send_resp_T_30 = _s2_send_resp_T_20 | _s2_send_resp_T_29; // @[package.scala:81:59]
wire _s2_send_resp_T_31 = _s2_send_resp_T_13 | _s2_send_resp_T_30; // @[package.scala:81:59]
wire _s2_send_resp_T_32 = _s2_send_resp_T_6 & _s2_send_resp_T_31; // @[Consts.scala:89:68]
wire s2_send_resp_0 = _s2_send_resp_T_32; // @[dcache.scala:454:49, :775:17]
reg s2_send_store_ack_REG; // @[dcache.scala:778:12]
wire _s2_send_store_ack_T = ~s2_nack_0; // @[dcache.scala:688:21, :706:60, :778:41]
wire _s2_send_store_ack_T_1 = s2_send_store_ack_REG & _s2_send_store_ack_T; // @[dcache.scala:778:{12,38,41}]
wire _s2_send_store_ack_T_4 = _s2_send_store_ack_T_2 | _s2_send_store_ack_T_3; // @[Consts.scala:90:{32,42,49}]
wire _s2_send_store_ack_T_6 = _s2_send_store_ack_T_4 | _s2_send_store_ack_T_5; // @[Consts.scala:90:{42,59,66}]
wire _s2_send_store_ack_T_11 = _s2_send_store_ack_T_7 | _s2_send_store_ack_T_8; // @[package.scala:16:47, :81:59]
wire _s2_send_store_ack_T_12 = _s2_send_store_ack_T_11 | _s2_send_store_ack_T_9; // @[package.scala:16:47, :81:59]
wire _s2_send_store_ack_T_13 = _s2_send_store_ack_T_12 | _s2_send_store_ack_T_10; // @[package.scala:16:47, :81:59]
wire _s2_send_store_ack_T_19 = _s2_send_store_ack_T_14 | _s2_send_store_ack_T_15; // @[package.scala:16:47, :81:59]
wire _s2_send_store_ack_T_20 = _s2_send_store_ack_T_19 | _s2_send_store_ack_T_16; // @[package.scala:16:47, :81:59]
wire _s2_send_store_ack_T_21 = _s2_send_store_ack_T_20 | _s2_send_store_ack_T_17; // @[package.scala:16:47, :81:59]
wire _s2_send_store_ack_T_22 = _s2_send_store_ack_T_21 | _s2_send_store_ack_T_18; // @[package.scala:16:47, :81:59]
wire _s2_send_store_ack_T_23 = _s2_send_store_ack_T_13 | _s2_send_store_ack_T_22; // @[package.scala:81:59]
wire _s2_send_store_ack_T_24 = _s2_send_store_ack_T_6 | _s2_send_store_ack_T_23; // @[Consts.scala:87:44, :90:{59,76}]
wire _s2_send_store_ack_T_25 = _s2_send_store_ack_T_1 & _s2_send_store_ack_T_24; // @[Consts.scala:90:76]
wire _mshrs_io_req_0_valid_T_70; // @[dcache.scala:798:77]
wire _T_81 = _mshrs_io_req_0_ready & _mshrs_io_req_0_valid_T_70; // @[Decoupled.scala:51:35]
assign replace = _T_81; // @[Decoupled.scala:51:35]
wire _s2_send_store_ack_T_26; // @[Decoupled.scala:51:35]
assign _s2_send_store_ack_T_26 = _T_81; // @[Decoupled.scala:51:35]
wire _s2_send_store_ack_T_27 = s2_hit_0 | _s2_send_store_ack_T_26; // @[Decoupled.scala:51:35]
wire _s2_send_store_ack_T_28 = _s2_send_store_ack_T_25 & _s2_send_store_ack_T_27; // @[dcache.scala:778:{53,87}, :779:18]
wire s2_send_store_ack_0 = _s2_send_store_ack_T_28; // @[dcache.scala:454:49, :778:87]
reg s2_send_nack_REG; // @[dcache.scala:780:44]
wire _s2_send_nack_T = s2_send_nack_REG & s2_nack_0; // @[dcache.scala:688:21, :780:{44,70}]
wire s2_send_nack_0 = _s2_send_nack_T; // @[dcache.scala:454:49, :780:70]
wire _s2_store_failed_T = s2_valid_0 & s2_nack_0; // @[dcache.scala:454:49, :688:21, :787:34]
wire _s2_store_failed_T_1 = _s2_store_failed_T & s2_send_nack_0; // @[dcache.scala:454:49, :787:{34,48}]
assign _s2_store_failed_T_2 = _s2_store_failed_T_1 & s2_req_0_uop_uses_stq; // @[dcache.scala:670:25, :787:{48,67}]
assign s2_store_failed = _s2_store_failed_T_2; // @[dcache.scala:636:29, :787:67]
wire _mshrs_io_req_0_valid_T = ~s2_hit_0; // @[dcache.scala:454:49, :689:36, :792:29]
wire _mshrs_io_req_0_valid_T_1 = s2_valid_0 & _mshrs_io_req_0_valid_T; // @[dcache.scala:454:49, :791:51, :792:29]
wire _mshrs_io_req_0_valid_T_2 = ~s2_nack_hit_0; // @[dcache.scala:760:31, :793:29]
wire _mshrs_io_req_0_valid_T_3 = _mshrs_io_req_0_valid_T_1 & _mshrs_io_req_0_valid_T_2; // @[dcache.scala:791:51, :792:51, :793:29]
wire _mshrs_io_req_0_valid_T_4 = ~s2_nack_victim_0; // @[dcache.scala:454:49, :794:29]
wire _mshrs_io_req_0_valid_T_5 = _mshrs_io_req_0_valid_T_3 & _mshrs_io_req_0_valid_T_4; // @[dcache.scala:792:51, :793:51, :794:29]
wire _mshrs_io_req_0_valid_T_7 = _mshrs_io_req_0_valid_T_5; // @[dcache.scala:793:51, :794:51]
wire _mshrs_io_req_0_valid_T_8 = ~s2_nack_wb_0; // @[dcache.scala:454:49, :796:29]
wire _mshrs_io_req_0_valid_T_9 = _mshrs_io_req_0_valid_T_7 & _mshrs_io_req_0_valid_T_8; // @[dcache.scala:794:51, :795:51, :796:29]
wire _mshrs_io_req_0_valid_T_11 = s2_type == 3'h5; // @[package.scala:16:47]
wire _mshrs_io_req_0_valid_T_12 = _mshrs_io_req_0_valid_T_10 | _mshrs_io_req_0_valid_T_11; // @[package.scala:16:47, :81:59]
wire _mshrs_io_req_0_valid_T_13 = _mshrs_io_req_0_valid_T_9 & _mshrs_io_req_0_valid_T_12; // @[package.scala:81:59]
wire _mshrs_io_req_0_valid_T_16 = _mshrs_io_req_0_valid_T_13; // @[dcache.scala:796:51, :797:77]
wire _mshrs_io_req_0_valid_T_17 = s2_req_0_uop_mem_cmd == 5'h2; // @[Consts.scala:88:35]
wire _mshrs_io_req_0_valid_T_19 = _mshrs_io_req_0_valid_T_17 | _mshrs_io_req_0_valid_T_18; // @[Consts.scala:88:{35,45,52}]
wire _mshrs_io_req_0_valid_T_24 = _mshrs_io_req_0_valid_T_20 | _mshrs_io_req_0_valid_T_21; // @[package.scala:16:47, :81:59]
wire _mshrs_io_req_0_valid_T_25 = _mshrs_io_req_0_valid_T_24 | _mshrs_io_req_0_valid_T_22; // @[package.scala:16:47, :81:59]
wire _mshrs_io_req_0_valid_T_26 = _mshrs_io_req_0_valid_T_25 | _mshrs_io_req_0_valid_T_23; // @[package.scala:16:47, :81:59]
wire _mshrs_io_req_0_valid_T_31 = _mshrs_io_req_0_valid_T_27 | _mshrs_io_req_0_valid_T_28; // @[package.scala:16:47, :81:59]
wire _mshrs_io_req_0_valid_T_32 = _mshrs_io_req_0_valid_T_31 | _mshrs_io_req_0_valid_T_29; // @[package.scala:16:47, :81:59]
wire _mshrs_io_req_0_valid_T_33 = _mshrs_io_req_0_valid_T_32 | _mshrs_io_req_0_valid_T_30; // @[package.scala:16:47, :81:59]
wire _mshrs_io_req_0_valid_T_39 = _mshrs_io_req_0_valid_T_34 | _mshrs_io_req_0_valid_T_35; // @[package.scala:16:47, :81:59]
wire _mshrs_io_req_0_valid_T_40 = _mshrs_io_req_0_valid_T_39 | _mshrs_io_req_0_valid_T_36; // @[package.scala:16:47, :81:59]
wire _mshrs_io_req_0_valid_T_41 = _mshrs_io_req_0_valid_T_40 | _mshrs_io_req_0_valid_T_37; // @[package.scala:16:47, :81:59]
wire _mshrs_io_req_0_valid_T_42 = _mshrs_io_req_0_valid_T_41 | _mshrs_io_req_0_valid_T_38; // @[package.scala:16:47, :81:59]
wire _mshrs_io_req_0_valid_T_43 = _mshrs_io_req_0_valid_T_33 | _mshrs_io_req_0_valid_T_42; // @[package.scala:81:59]
wire _mshrs_io_req_0_valid_T_44 = _mshrs_io_req_0_valid_T_26 | _mshrs_io_req_0_valid_T_43; // @[package.scala:81:59]
wire _mshrs_io_req_0_valid_T_45 = _mshrs_io_req_0_valid_T_19 | _mshrs_io_req_0_valid_T_44; // @[Consts.scala:88:45, :89:68]
wire _mshrs_io_req_0_valid_T_48 = _mshrs_io_req_0_valid_T_46 | _mshrs_io_req_0_valid_T_47; // @[Consts.scala:90:{32,42,49}]
wire _mshrs_io_req_0_valid_T_50 = _mshrs_io_req_0_valid_T_48 | _mshrs_io_req_0_valid_T_49; // @[Consts.scala:90:{42,59,66}]
wire _mshrs_io_req_0_valid_T_55 = _mshrs_io_req_0_valid_T_51 | _mshrs_io_req_0_valid_T_52; // @[package.scala:16:47, :81:59]
wire _mshrs_io_req_0_valid_T_56 = _mshrs_io_req_0_valid_T_55 | _mshrs_io_req_0_valid_T_53; // @[package.scala:16:47, :81:59]
wire _mshrs_io_req_0_valid_T_57 = _mshrs_io_req_0_valid_T_56 | _mshrs_io_req_0_valid_T_54; // @[package.scala:16:47, :81:59]
wire _mshrs_io_req_0_valid_T_63 = _mshrs_io_req_0_valid_T_58 | _mshrs_io_req_0_valid_T_59; // @[package.scala:16:47, :81:59]
wire _mshrs_io_req_0_valid_T_64 = _mshrs_io_req_0_valid_T_63 | _mshrs_io_req_0_valid_T_60; // @[package.scala:16:47, :81:59]
wire _mshrs_io_req_0_valid_T_65 = _mshrs_io_req_0_valid_T_64 | _mshrs_io_req_0_valid_T_61; // @[package.scala:16:47, :81:59]
wire _mshrs_io_req_0_valid_T_66 = _mshrs_io_req_0_valid_T_65 | _mshrs_io_req_0_valid_T_62; // @[package.scala:16:47, :81:59]
wire _mshrs_io_req_0_valid_T_67 = _mshrs_io_req_0_valid_T_57 | _mshrs_io_req_0_valid_T_66; // @[package.scala:81:59]
wire _mshrs_io_req_0_valid_T_68 = _mshrs_io_req_0_valid_T_50 | _mshrs_io_req_0_valid_T_67; // @[Consts.scala:87:44, :90:{59,76}]
wire _mshrs_io_req_0_valid_T_69 = _mshrs_io_req_0_valid_T_45 | _mshrs_io_req_0_valid_T_68; // @[Consts.scala:90:76]
assign _mshrs_io_req_0_valid_T_70 = _mshrs_io_req_0_valid_T_16 & _mshrs_io_req_0_valid_T_69; // @[dcache.scala:797:77, :798:77, :800:65]
wire [1:0] _mshrs_io_req_0_bits_old_meta_T_coh_state = s2_tag_match_0 ? mshrs_io_req_0_bits_old_meta_meta_coh_state : s2_repl_meta_0_coh_state; // @[HellaCache.scala:305:20]
wire [21:0] _mshrs_io_req_0_bits_old_meta_T_tag = s2_tag_match_0 ? mshrs_io_req_0_bits_old_meta_meta_tag : s2_repl_meta_0_tag; // @[HellaCache.scala:305:20]
wire [1:0] _mshrs_io_req_0_bits_way_en_T = s2_tag_match_0 ? s2_tag_match_way_0 : s2_replaced_way_en; // @[OneHot.scala:58:35]
wire _mshrs_io_req_is_probe_0_T = s2_type == 3'h1; // @[dcache.scala:671:25, :812:49]
wire _mshrs_io_req_is_probe_0_T_1 = _mshrs_io_req_is_probe_0_T & s2_valid_0; // @[dcache.scala:454:49, :812:{49,61}]
wire _mshrs_io_meta_resp_valid_T = ~s2_nack_hit_0; // @[dcache.scala:760:31, :793:29, :815:36]
wire _mshrs_io_meta_resp_valid_T_1 = _mshrs_io_meta_resp_valid_T | _prober_io_mshr_wb_rdy; // @[dcache.scala:459:22, :815:{36,52}]
reg [1:0] mshrs_io_meta_resp_bits_REG_0_coh_state; // @[dcache.scala:816:70]
reg [21:0] mshrs_io_meta_resp_bits_REG_0_tag; // @[dcache.scala:816:70]
reg [1:0] mshrs_io_meta_resp_bits_REG_1_coh_state; // @[dcache.scala:816:70]
reg [21:0] mshrs_io_meta_resp_bits_REG_1_tag; // @[dcache.scala:816:70]
wire [1:0] _mshrs_io_meta_resp_bits_WIRE_2_state; // @[Mux.scala:30:73]
wire [21:0] _mshrs_io_meta_resp_bits_WIRE_1; // @[Mux.scala:30:73]
wire [21:0] _mshrs_io_meta_resp_bits_T_2 = _mshrs_io_meta_resp_bits_T ? mshrs_io_meta_resp_bits_REG_0_tag : 22'h0; // @[Mux.scala:30:73, :32:36]
wire [21:0] _mshrs_io_meta_resp_bits_T_3 = _mshrs_io_meta_resp_bits_T_1 ? mshrs_io_meta_resp_bits_REG_1_tag : 22'h0; // @[Mux.scala:30:73, :32:36]
wire [21:0] _mshrs_io_meta_resp_bits_T_4 = _mshrs_io_meta_resp_bits_T_2 | _mshrs_io_meta_resp_bits_T_3; // @[Mux.scala:30:73]
assign _mshrs_io_meta_resp_bits_WIRE_1 = _mshrs_io_meta_resp_bits_T_4; // @[Mux.scala:30:73]
wire [21:0] _mshrs_io_meta_resp_bits_WIRE_tag = _mshrs_io_meta_resp_bits_WIRE_1; // @[Mux.scala:30:73]
wire [1:0] _mshrs_io_meta_resp_bits_WIRE_3; // @[Mux.scala:30:73]
wire [1:0] _mshrs_io_meta_resp_bits_WIRE_coh_state = _mshrs_io_meta_resp_bits_WIRE_2_state; // @[Mux.scala:30:73]
wire [1:0] _mshrs_io_meta_resp_bits_T_5 = _mshrs_io_meta_resp_bits_T ? mshrs_io_meta_resp_bits_REG_0_coh_state : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _mshrs_io_meta_resp_bits_T_6 = _mshrs_io_meta_resp_bits_T_1 ? mshrs_io_meta_resp_bits_REG_1_coh_state : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _mshrs_io_meta_resp_bits_T_7 = _mshrs_io_meta_resp_bits_T_5 | _mshrs_io_meta_resp_bits_T_6; // @[Mux.scala:30:73]
assign _mshrs_io_meta_resp_bits_WIRE_3 = _mshrs_io_meta_resp_bits_T_7; // @[Mux.scala:30:73]
assign _mshrs_io_meta_resp_bits_WIRE_2_state = _mshrs_io_meta_resp_bits_WIRE_3; // @[Mux.scala:30:73]
wire _prober_io_req_valid_T = ~lrsc_valid; // @[dcache.scala:699:31, :821:46]
wire _prober_io_req_valid_T_1 = nodeOut_b_valid & _prober_io_req_valid_T; // @[MixedNode.scala:542:17]
wire _nodeOut_b_ready_T = ~lrsc_valid; // @[dcache.scala:699:31, :821:46, :822:51]
assign _nodeOut_b_ready_T_1 = _prober_io_req_ready & _nodeOut_b_ready_T; // @[dcache.scala:459:22, :822:{48,51}]
assign nodeOut_b_ready = _nodeOut_b_ready_T_1; // @[MixedNode.scala:542:17]
wire _prober_io_wb_rdy_T = _prober_io_meta_write_bits_idx != _wb_io_idx_bits; // @[dcache.scala:458:18, :459:22, :828:59]
wire _prober_io_wb_rdy_T_1 = ~_wb_io_idx_valid; // @[dcache.scala:458:18, :828:82]
wire _prober_io_wb_rdy_T_2 = _prober_io_wb_rdy_T | _prober_io_wb_rdy_T_1; // @[dcache.scala:828:{59,79,82}]
wire _wb_io_mem_grant_T_1 = nodeOut_d_bits_source == 4'h8; // @[MixedNode.scala:542:17]
assign nodeOut_d_ready = _wb_io_mem_grant_T_1 | _mshrs_io_mem_grant_ready; // @[MixedNode.scala:542:17]
wire _wb_io_mem_grant_T = nodeOut_d_ready & nodeOut_d_valid; // @[Decoupled.scala:51:35]
wire _wb_io_mem_grant_T_2 = _wb_io_mem_grant_T & _wb_io_mem_grant_T_1; // @[Decoupled.scala:51:35]
wire opdata = _wb_io_release_bits_opcode[0]; // @[Edges.scala:102:36]
wire [26:0] _decode_T_3 = 27'hFFF << _prober_io_rep_bits_size; // @[package.scala:243:71]
wire [11:0] _decode_T_4 = _decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _decode_T_5 = ~_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] decode_1 = _decode_T_5[11:3]; // @[package.scala:243:46]
reg [8:0] beatsLeft; // @[Arbiter.scala:60:30]
wire idle = beatsLeft == 9'h0; // @[Arbiter.scala:60:30, :61:28]
wire latch = idle & nodeOut_c_ready; // @[Arbiter.scala:61:28, :62:24]
wire [1:0] _readys_T = {_prober_io_rep_valid, _wb_io_release_valid}; // @[Arbiter.scala:68:51]
wire [2:0] _readys_T_1 = {_readys_T, 1'h0}; // @[package.scala:253:48]
wire [1:0] _readys_T_2 = _readys_T_1[1:0]; // @[package.scala:253:{48,53}]
wire [1:0] _readys_T_3 = _readys_T | _readys_T_2; // @[package.scala:253:{43,53}]
wire [1:0] _readys_T_4 = _readys_T_3; // @[package.scala:253:43, :254:17]
wire [2:0] _readys_T_5 = {_readys_T_4, 1'h0}; // @[package.scala:254:17]
wire [1:0] _readys_T_6 = _readys_T_5[1:0]; // @[Arbiter.scala:16:{78,83}]
wire [1:0] _readys_T_7 = ~_readys_T_6; // @[Arbiter.scala:16:{61,83}]
wire _readys_T_8 = _readys_T_7[0]; // @[Arbiter.scala:16:61, :68:76]
wire readys_0 = _readys_T_8; // @[Arbiter.scala:68:{27,76}]
wire _readys_T_9 = _readys_T_7[1]; // @[Arbiter.scala:16:61, :68:76]
wire readys_1 = _readys_T_9; // @[Arbiter.scala:68:{27,76}]
wire _winner_T = readys_0 & _wb_io_release_valid; // @[Arbiter.scala:68:27, :71:69]
wire winner_0 = _winner_T; // @[Arbiter.scala:71:{27,69}]
wire _winner_T_1 = readys_1 & _prober_io_rep_valid; // @[Arbiter.scala:68:27, :71:69]
wire winner_1 = _winner_T_1; // @[Arbiter.scala:71:{27,69}]
wire prefixOR_1 = winner_0; // @[Arbiter.scala:71:27, :76:48]
wire _prefixOR_T = prefixOR_1 | winner_1; // @[Arbiter.scala:71:27, :76:48]
wire _nodeOut_c_valid_T = _wb_io_release_valid | _prober_io_rep_valid; // @[Arbiter.scala:79:31, :96:46]
wire [8:0] maskedBeats_0 = winner_0 & opdata ? 9'h7 : 9'h0; // @[Edges.scala:102:36, :221:14]
wire [8:0] initBeats = maskedBeats_0; // @[Arbiter.scala:82:69, :84:44]
wire _GEN_19 = nodeOut_c_ready & nodeOut_c_valid; // @[Decoupled.scala:51:35]
wire _beatsLeft_T; // @[Decoupled.scala:51:35]
assign _beatsLeft_T = _GEN_19; // @[Decoupled.scala:51:35]
wire _io_lsu_perf_release_T; // @[Decoupled.scala:51:35]
assign _io_lsu_perf_release_T = _GEN_19; // @[Decoupled.scala:51:35]
wire [9:0] _beatsLeft_T_1 = {1'h0, beatsLeft} - {9'h0, _beatsLeft_T}; // @[Decoupled.scala:51:35]
wire [8:0] _beatsLeft_T_2 = _beatsLeft_T_1[8:0]; // @[Arbiter.scala:85:52]
wire [8:0] _beatsLeft_T_3 = latch ? initBeats : _beatsLeft_T_2; // @[Arbiter.scala:62:24, :84:44, :85:{23,52}]
reg state_0; // @[Arbiter.scala:88:26]
reg state_1; // @[Arbiter.scala:88:26]
wire muxState_0 = idle ? winner_0 : state_0; // @[Arbiter.scala:61:28, :71:27, :88:26, :89:25]
wire muxState_1 = idle ? winner_1 : state_1; // @[Arbiter.scala:61:28, :71:27, :88:26, :89:25]
wire allowed_0 = idle ? readys_0 : state_0; // @[Arbiter.scala:61:28, :68:27, :88:26, :92:24]
wire allowed_1 = idle ? readys_1 : state_1; // @[Arbiter.scala:61:28, :68:27, :88:26, :92:24]
wire _wb_io_release_ready_T = nodeOut_c_ready & allowed_0; // @[Arbiter.scala:92:24, :94:31]
wire _prober_io_rep_ready_T = nodeOut_c_ready & allowed_1; // @[Arbiter.scala:92:24, :94:31]
wire _nodeOut_c_valid_T_1 = state_0 & _wb_io_release_valid; // @[Mux.scala:30:73]
wire _nodeOut_c_valid_T_2 = state_1 & _prober_io_rep_valid; // @[Mux.scala:30:73]
wire _nodeOut_c_valid_T_3 = _nodeOut_c_valid_T_1 | _nodeOut_c_valid_T_2; // @[Mux.scala:30:73]
wire _nodeOut_c_valid_WIRE = _nodeOut_c_valid_T_3; // @[Mux.scala:30:73]
assign _nodeOut_c_valid_T_4 = idle ? _nodeOut_c_valid_T : _nodeOut_c_valid_WIRE; // @[Mux.scala:30:73]
assign nodeOut_c_valid = _nodeOut_c_valid_T_4; // @[Arbiter.scala:96:24]
wire [2:0] _nodeOut_c_bits_WIRE_9; // @[Mux.scala:30:73]
assign nodeOut_c_bits_opcode = _nodeOut_c_bits_WIRE_opcode; // @[Mux.scala:30:73]
wire [2:0] _nodeOut_c_bits_WIRE_8; // @[Mux.scala:30:73]
assign nodeOut_c_bits_param = _nodeOut_c_bits_WIRE_param; // @[Mux.scala:30:73]
wire [3:0] _nodeOut_c_bits_WIRE_7; // @[Mux.scala:30:73]
assign nodeOut_c_bits_size = _nodeOut_c_bits_WIRE_size; // @[Mux.scala:30:73]
wire [3:0] _nodeOut_c_bits_WIRE_6; // @[Mux.scala:30:73]
assign nodeOut_c_bits_source = _nodeOut_c_bits_WIRE_source; // @[Mux.scala:30:73]
wire [31:0] _nodeOut_c_bits_WIRE_5; // @[Mux.scala:30:73]
assign nodeOut_c_bits_address = _nodeOut_c_bits_WIRE_address; // @[Mux.scala:30:73]
wire [63:0] _nodeOut_c_bits_WIRE_2; // @[Mux.scala:30:73]
assign nodeOut_c_bits_data = _nodeOut_c_bits_WIRE_data; // @[Mux.scala:30:73]
wire [63:0] _nodeOut_c_bits_T_3 = muxState_0 ? _wb_io_release_bits_data : 64'h0; // @[Mux.scala:30:73]
wire [63:0] _nodeOut_c_bits_T_5 = _nodeOut_c_bits_T_3; // @[Mux.scala:30:73]
assign _nodeOut_c_bits_WIRE_2 = _nodeOut_c_bits_T_5; // @[Mux.scala:30:73]
assign _nodeOut_c_bits_WIRE_data = _nodeOut_c_bits_WIRE_2; // @[Mux.scala:30:73]
wire [31:0] _nodeOut_c_bits_T_6 = muxState_0 ? _wb_io_release_bits_address : 32'h0; // @[Mux.scala:30:73]
wire [31:0] _nodeOut_c_bits_T_7 = muxState_1 ? _prober_io_rep_bits_address : 32'h0; // @[Mux.scala:30:73]
wire [31:0] _nodeOut_c_bits_T_8 = _nodeOut_c_bits_T_6 | _nodeOut_c_bits_T_7; // @[Mux.scala:30:73]
assign _nodeOut_c_bits_WIRE_5 = _nodeOut_c_bits_T_8; // @[Mux.scala:30:73]
assign _nodeOut_c_bits_WIRE_address = _nodeOut_c_bits_WIRE_5; // @[Mux.scala:30:73]
wire [3:0] _nodeOut_c_bits_T_9 = muxState_0 ? _wb_io_release_bits_source : 4'h0; // @[Mux.scala:30:73]
wire [3:0] _nodeOut_c_bits_T_10 = muxState_1 ? _prober_io_rep_bits_source : 4'h0; // @[Mux.scala:30:73]
wire [3:0] _nodeOut_c_bits_T_11 = _nodeOut_c_bits_T_9 | _nodeOut_c_bits_T_10; // @[Mux.scala:30:73]
assign _nodeOut_c_bits_WIRE_6 = _nodeOut_c_bits_T_11; // @[Mux.scala:30:73]
assign _nodeOut_c_bits_WIRE_source = _nodeOut_c_bits_WIRE_6; // @[Mux.scala:30:73]
wire [3:0] _nodeOut_c_bits_T_12 = muxState_0 ? 4'h6 : 4'h0; // @[Mux.scala:30:73]
wire [3:0] _nodeOut_c_bits_T_13 = muxState_1 ? _prober_io_rep_bits_size : 4'h0; // @[Mux.scala:30:73]
wire [3:0] _nodeOut_c_bits_T_14 = _nodeOut_c_bits_T_12 | _nodeOut_c_bits_T_13; // @[Mux.scala:30:73]
assign _nodeOut_c_bits_WIRE_7 = _nodeOut_c_bits_T_14; // @[Mux.scala:30:73]
assign _nodeOut_c_bits_WIRE_size = _nodeOut_c_bits_WIRE_7; // @[Mux.scala:30:73]
wire [2:0] _nodeOut_c_bits_T_15 = muxState_0 ? _wb_io_release_bits_param : 3'h0; // @[Mux.scala:30:73]
wire [2:0] _nodeOut_c_bits_T_16 = muxState_1 ? _prober_io_rep_bits_param : 3'h0; // @[Mux.scala:30:73]
wire [2:0] _nodeOut_c_bits_T_17 = _nodeOut_c_bits_T_15 | _nodeOut_c_bits_T_16; // @[Mux.scala:30:73]
assign _nodeOut_c_bits_WIRE_8 = _nodeOut_c_bits_T_17; // @[Mux.scala:30:73]
assign _nodeOut_c_bits_WIRE_param = _nodeOut_c_bits_WIRE_8; // @[Mux.scala:30:73]
wire [2:0] _nodeOut_c_bits_T_18 = muxState_0 ? _wb_io_release_bits_opcode : 3'h0; // @[Mux.scala:30:73]
wire [2:0] _nodeOut_c_bits_T_19 = {muxState_1, 2'h0}; // @[Mux.scala:30:73]
wire [2:0] _nodeOut_c_bits_T_20 = _nodeOut_c_bits_T_18 | _nodeOut_c_bits_T_19; // @[Mux.scala:30:73]
assign _nodeOut_c_bits_WIRE_9 = _nodeOut_c_bits_T_20; // @[Mux.scala:30:73]
assign _nodeOut_c_bits_WIRE_opcode = _nodeOut_c_bits_WIRE_9; // @[Mux.scala:30:73]
wire [26:0] _io_lsu_perf_release_beats1_decode_T = 27'hFFF << nodeOut_c_bits_size; // @[package.scala:243:71]
wire [11:0] _io_lsu_perf_release_beats1_decode_T_1 = _io_lsu_perf_release_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _io_lsu_perf_release_beats1_decode_T_2 = ~_io_lsu_perf_release_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] io_lsu_perf_release_beats1_decode = _io_lsu_perf_release_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire io_lsu_perf_release_beats1_opdata = nodeOut_c_bits_opcode[0]; // @[Edges.scala:102:36]
wire [8:0] io_lsu_perf_release_beats1 = io_lsu_perf_release_beats1_opdata ? io_lsu_perf_release_beats1_decode : 9'h0; // @[Edges.scala:102:36, :220:59, :221:14]
reg [8:0] io_lsu_perf_release_counter; // @[Edges.scala:229:27]
wire [9:0] _io_lsu_perf_release_counter1_T = {1'h0, io_lsu_perf_release_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] io_lsu_perf_release_counter1 = _io_lsu_perf_release_counter1_T[8:0]; // @[Edges.scala:230:28]
wire io_lsu_perf_release_first = io_lsu_perf_release_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _io_lsu_perf_release_last_T = io_lsu_perf_release_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _io_lsu_perf_release_last_T_1 = io_lsu_perf_release_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire io_lsu_perf_release_last = _io_lsu_perf_release_last_T | _io_lsu_perf_release_last_T_1; // @[Edges.scala:232:{25,33,43}]
assign io_lsu_perf_release_done = io_lsu_perf_release_last & _io_lsu_perf_release_T; // @[Decoupled.scala:51:35]
assign io_lsu_perf_release_0 = io_lsu_perf_release_done; // @[Edges.scala:233:22]
wire [8:0] _io_lsu_perf_release_count_T = ~io_lsu_perf_release_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] io_lsu_perf_release_count = io_lsu_perf_release_beats1 & _io_lsu_perf_release_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _io_lsu_perf_release_counter_T = io_lsu_perf_release_first ? io_lsu_perf_release_beats1 : io_lsu_perf_release_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire _io_lsu_perf_acquire_T = nodeOut_a_ready & nodeOut_a_valid; // @[Decoupled.scala:51:35]
wire [26:0] _io_lsu_perf_acquire_beats1_decode_T = 27'hFFF << nodeOut_a_bits_size; // @[package.scala:243:71]
wire [11:0] _io_lsu_perf_acquire_beats1_decode_T_1 = _io_lsu_perf_acquire_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _io_lsu_perf_acquire_beats1_decode_T_2 = ~_io_lsu_perf_acquire_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] io_lsu_perf_acquire_beats1_decode = _io_lsu_perf_acquire_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire _io_lsu_perf_acquire_beats1_opdata_T = nodeOut_a_bits_opcode[2]; // @[Edges.scala:92:37]
wire io_lsu_perf_acquire_beats1_opdata = ~_io_lsu_perf_acquire_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [8:0] io_lsu_perf_acquire_beats1 = io_lsu_perf_acquire_beats1_opdata ? io_lsu_perf_acquire_beats1_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [8:0] io_lsu_perf_acquire_counter; // @[Edges.scala:229:27]
wire [9:0] _io_lsu_perf_acquire_counter1_T = {1'h0, io_lsu_perf_acquire_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] io_lsu_perf_acquire_counter1 = _io_lsu_perf_acquire_counter1_T[8:0]; // @[Edges.scala:230:28]
wire io_lsu_perf_acquire_first = io_lsu_perf_acquire_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _io_lsu_perf_acquire_last_T = io_lsu_perf_acquire_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _io_lsu_perf_acquire_last_T_1 = io_lsu_perf_acquire_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire io_lsu_perf_acquire_last = _io_lsu_perf_acquire_last_T | _io_lsu_perf_acquire_last_T_1; // @[Edges.scala:232:{25,33,43}]
assign io_lsu_perf_acquire_done = io_lsu_perf_acquire_last & _io_lsu_perf_acquire_T; // @[Decoupled.scala:51:35]
assign io_lsu_perf_acquire_0 = io_lsu_perf_acquire_done; // @[Edges.scala:233:22]
wire [8:0] _io_lsu_perf_acquire_count_T = ~io_lsu_perf_acquire_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] io_lsu_perf_acquire_count = io_lsu_perf_acquire_beats1 & _io_lsu_perf_acquire_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _io_lsu_perf_acquire_counter_T = io_lsu_perf_acquire_first ? io_lsu_perf_acquire_beats1 : io_lsu_perf_acquire_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [63:0] s2_data_word_prebypass_0 = _s2_data_word_prebypass_T_1; // @[dcache.scala:454:49, :868:63]
wire [63:0] _s2_data_word_0_T_2; // @[dcache.scala:919:27]
wire [63:0] s2_data_word_0; // @[dcache.scala:869:26]
wire [63:0] size_dat_padded = s2_data_word_0; // @[AMOALU.scala:13:27]
assign _io_lsu_resp_0_valid_T = s2_valid_0 & s2_send_resp_0; // @[dcache.scala:454:49, :877:41]
assign io_lsu_resp_0_valid_0 = _io_lsu_resp_0_valid_T; // @[dcache.scala:438:7, :877:41]
wire _io_lsu_resp_0_bits_data_shifted_T = s2_req_0_addr[2]; // @[AMOALU.scala:42:29]
wire [31:0] _io_lsu_resp_0_bits_data_shifted_T_1 = s2_data_word_0[63:32]; // @[AMOALU.scala:42:37]
wire [31:0] _io_lsu_resp_0_bits_data_T_5 = s2_data_word_0[63:32]; // @[AMOALU.scala:42:37, :45:94]
wire [31:0] _io_lsu_resp_0_bits_data_shifted_T_2 = s2_data_word_0[31:0]; // @[AMOALU.scala:42:55]
wire [31:0] io_lsu_resp_0_bits_data_shifted = _io_lsu_resp_0_bits_data_shifted_T ? _io_lsu_resp_0_bits_data_shifted_T_1 : _io_lsu_resp_0_bits_data_shifted_T_2; // @[AMOALU.scala:42:{24,29,37,55}]
wire [31:0] io_lsu_resp_0_bits_data_zeroed = io_lsu_resp_0_bits_data_shifted; // @[AMOALU.scala:42:24, :44:23]
wire _io_lsu_resp_0_bits_data_T = size == 2'h2; // @[AMOALU.scala:11:18, :45:26]
wire _io_lsu_resp_0_bits_data_T_1 = _io_lsu_resp_0_bits_data_T; // @[AMOALU.scala:45:{26,34}]
wire _io_lsu_resp_0_bits_data_T_2 = io_lsu_resp_0_bits_data_zeroed[31]; // @[AMOALU.scala:44:23, :45:81]
wire _io_lsu_resp_0_bits_data_T_3 = s2_req_0_uop_mem_signed & _io_lsu_resp_0_bits_data_T_2; // @[AMOALU.scala:45:{72,81}]
wire [31:0] _io_lsu_resp_0_bits_data_T_4 = {32{_io_lsu_resp_0_bits_data_T_3}}; // @[AMOALU.scala:45:{49,72}]
wire [31:0] _io_lsu_resp_0_bits_data_T_6 = _io_lsu_resp_0_bits_data_T_1 ? _io_lsu_resp_0_bits_data_T_4 : _io_lsu_resp_0_bits_data_T_5; // @[AMOALU.scala:45:{20,34,49,94}]
wire [63:0] _io_lsu_resp_0_bits_data_T_7 = {_io_lsu_resp_0_bits_data_T_6, io_lsu_resp_0_bits_data_zeroed}; // @[AMOALU.scala:44:23, :45:{16,20}]
wire _io_lsu_resp_0_bits_data_shifted_T_3 = s2_req_0_addr[1]; // @[AMOALU.scala:42:29]
wire [15:0] _io_lsu_resp_0_bits_data_shifted_T_4 = _io_lsu_resp_0_bits_data_T_7[31:16]; // @[AMOALU.scala:42:37, :45:16]
wire [15:0] _io_lsu_resp_0_bits_data_shifted_T_5 = _io_lsu_resp_0_bits_data_T_7[15:0]; // @[AMOALU.scala:42:55, :45:16]
wire [15:0] io_lsu_resp_0_bits_data_shifted_1 = _io_lsu_resp_0_bits_data_shifted_T_3 ? _io_lsu_resp_0_bits_data_shifted_T_4 : _io_lsu_resp_0_bits_data_shifted_T_5; // @[AMOALU.scala:42:{24,29,37,55}]
wire [15:0] io_lsu_resp_0_bits_data_zeroed_1 = io_lsu_resp_0_bits_data_shifted_1; // @[AMOALU.scala:42:24, :44:23]
wire _io_lsu_resp_0_bits_data_T_8 = size == 2'h1; // @[AMOALU.scala:11:18, :45:26]
wire _io_lsu_resp_0_bits_data_T_9 = _io_lsu_resp_0_bits_data_T_8; // @[AMOALU.scala:45:{26,34}]
wire _io_lsu_resp_0_bits_data_T_10 = io_lsu_resp_0_bits_data_zeroed_1[15]; // @[AMOALU.scala:44:23, :45:81]
wire _io_lsu_resp_0_bits_data_T_11 = s2_req_0_uop_mem_signed & _io_lsu_resp_0_bits_data_T_10; // @[AMOALU.scala:45:{72,81}]
wire [47:0] _io_lsu_resp_0_bits_data_T_12 = {48{_io_lsu_resp_0_bits_data_T_11}}; // @[AMOALU.scala:45:{49,72}]
wire [47:0] _io_lsu_resp_0_bits_data_T_13 = _io_lsu_resp_0_bits_data_T_7[63:16]; // @[AMOALU.scala:45:{16,94}]
wire [47:0] _io_lsu_resp_0_bits_data_T_14 = _io_lsu_resp_0_bits_data_T_9 ? _io_lsu_resp_0_bits_data_T_12 : _io_lsu_resp_0_bits_data_T_13; // @[AMOALU.scala:45:{20,34,49,94}]
wire [63:0] _io_lsu_resp_0_bits_data_T_15 = {_io_lsu_resp_0_bits_data_T_14, io_lsu_resp_0_bits_data_zeroed_1}; // @[AMOALU.scala:44:23, :45:{16,20}]
wire _io_lsu_resp_0_bits_data_shifted_T_6 = s2_req_0_addr[0]; // @[AMOALU.scala:42:29]
wire [7:0] _io_lsu_resp_0_bits_data_shifted_T_7 = _io_lsu_resp_0_bits_data_T_15[15:8]; // @[AMOALU.scala:42:37, :45:16]
wire [7:0] _io_lsu_resp_0_bits_data_shifted_T_8 = _io_lsu_resp_0_bits_data_T_15[7:0]; // @[AMOALU.scala:42:55, :45:16]
wire [7:0] io_lsu_resp_0_bits_data_shifted_2 = _io_lsu_resp_0_bits_data_shifted_T_6 ? _io_lsu_resp_0_bits_data_shifted_T_7 : _io_lsu_resp_0_bits_data_shifted_T_8; // @[AMOALU.scala:42:{24,29,37,55}]
wire [7:0] io_lsu_resp_0_bits_data_zeroed_2 = io_lsu_resp_0_bits_data_doZero_2 ? 8'h0 : io_lsu_resp_0_bits_data_shifted_2; // @[AMOALU.scala:42:24, :43:31, :44:23]
wire _io_lsu_resp_0_bits_data_T_16 = size == 2'h0; // @[AMOALU.scala:11:18, :45:26]
wire _io_lsu_resp_0_bits_data_T_17 = _io_lsu_resp_0_bits_data_T_16 | io_lsu_resp_0_bits_data_doZero_2; // @[AMOALU.scala:43:31, :45:{26,34}]
wire _io_lsu_resp_0_bits_data_T_18 = io_lsu_resp_0_bits_data_zeroed_2[7]; // @[AMOALU.scala:44:23, :45:81]
wire _io_lsu_resp_0_bits_data_T_19 = s2_req_0_uop_mem_signed & _io_lsu_resp_0_bits_data_T_18; // @[AMOALU.scala:45:{72,81}]
wire [55:0] _io_lsu_resp_0_bits_data_T_20 = {56{_io_lsu_resp_0_bits_data_T_19}}; // @[AMOALU.scala:45:{49,72}]
wire [55:0] _io_lsu_resp_0_bits_data_T_21 = _io_lsu_resp_0_bits_data_T_15[63:8]; // @[AMOALU.scala:45:{16,94}]
wire [55:0] _io_lsu_resp_0_bits_data_T_22 = _io_lsu_resp_0_bits_data_T_17 ? _io_lsu_resp_0_bits_data_T_20 : _io_lsu_resp_0_bits_data_T_21; // @[AMOALU.scala:45:{20,34,49,94}]
wire [63:0] _io_lsu_resp_0_bits_data_T_23 = {_io_lsu_resp_0_bits_data_T_22, io_lsu_resp_0_bits_data_zeroed_2}; // @[AMOALU.scala:44:23, :45:{16,20}]
assign _io_lsu_resp_0_bits_data_T_24 = {_io_lsu_resp_0_bits_data_T_23[63:1], _io_lsu_resp_0_bits_data_T_23[0] | s2_sc_fail}; // @[AMOALU.scala:45:16]
assign io_lsu_resp_0_bits_data_0 = _io_lsu_resp_0_bits_data_T_24; // @[dcache.scala:438:7, :879:49]
assign _io_lsu_nack_0_valid_T = s2_valid_0 & s2_send_nack_0; // @[dcache.scala:454:49, :884:41]
assign io_lsu_nack_0_valid_0 = _io_lsu_nack_0_valid_T; // @[dcache.scala:438:7, :884:41] |
Generate the Verilog code corresponding to the following Chisel files.
File ClockDomain.scala:
package freechips.rocketchip.prci
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
abstract class Domain(implicit p: Parameters) extends LazyModule with HasDomainCrossing
{
def clockBundle: ClockBundle
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
childClock := clockBundle.clock
childReset := clockBundle.reset
override def provideImplicitClockToLazyChildren = true
// these are just for backwards compatibility with external devices
// that were manually wiring themselves to the domain's clock/reset input:
val clock = IO(Output(chiselTypeOf(clockBundle.clock)))
val reset = IO(Output(chiselTypeOf(clockBundle.reset)))
clock := clockBundle.clock
reset := clockBundle.reset
}
}
abstract class ClockDomain(implicit p: Parameters) extends Domain with HasClockDomainCrossing
class ClockSinkDomain(val clockSinkParams: ClockSinkParameters)(implicit p: Parameters) extends ClockDomain
{
def this(take: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSinkParameters(take = take, name = name))
val clockNode = ClockSinkNode(Seq(clockSinkParams))
def clockBundle = clockNode.in.head._1
override lazy val desiredName = (clockSinkParams.name.toSeq :+ "ClockSinkDomain").mkString
}
class ClockSourceDomain(val clockSourceParams: ClockSourceParameters)(implicit p: Parameters) extends ClockDomain
{
def this(give: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSourceParameters(give = give, name = name))
val clockNode = ClockSourceNode(Seq(clockSourceParams))
def clockBundle = clockNode.out.head._1
override lazy val desiredName = (clockSourceParams.name.toSeq :+ "ClockSourceDomain").mkString
}
abstract class ResetDomain(implicit p: Parameters) extends Domain with HasResetDomainCrossing
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File NoC.scala:
package constellation.noc
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp, BundleBridgeSink, InModuleBody}
import freechips.rocketchip.util.ElaborationArtefacts
import freechips.rocketchip.prci._
import constellation.router._
import constellation.channel._
import constellation.routing.{RoutingRelation, ChannelRoutingInfo}
import constellation.topology.{PhysicalTopology, UnidirectionalLine}
class NoCTerminalIO(
val ingressParams: Seq[IngressChannelParams],
val egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Bundle {
val ingress = MixedVec(ingressParams.map { u => Flipped(new IngressChannel(u)) })
val egress = MixedVec(egressParams.map { u => new EgressChannel(u) })
}
class NoC(nocParams: NoCParams)(implicit p: Parameters) extends LazyModule {
override def shouldBeInlined = nocParams.inlineNoC
val internalParams = InternalNoCParams(nocParams)
val allChannelParams = internalParams.channelParams
val allIngressParams = internalParams.ingressParams
val allEgressParams = internalParams.egressParams
val allRouterParams = internalParams.routerParams
val iP = p.alterPartial({ case InternalNoCKey => internalParams })
val nNodes = nocParams.topology.nNodes
val nocName = nocParams.nocName
val skipValidationChecks = nocParams.skipValidationChecks
val clockSourceNodes = Seq.tabulate(nNodes) { i => ClockSourceNode(Seq(ClockSourceParameters())) }
val router_sink_domains = Seq.tabulate(nNodes) { i =>
val router_sink_domain = LazyModule(new ClockSinkDomain(ClockSinkParameters(
name = Some(s"${nocName}_router_$i")
)))
router_sink_domain.clockNode := clockSourceNodes(i)
router_sink_domain
}
val routers = Seq.tabulate(nNodes) { i => router_sink_domains(i) {
val inParams = allChannelParams.filter(_.destId == i).map(
_.copy(payloadBits=allRouterParams(i).user.payloadBits)
)
val outParams = allChannelParams.filter(_.srcId == i).map(
_.copy(payloadBits=allRouterParams(i).user.payloadBits)
)
val ingressParams = allIngressParams.filter(_.destId == i).map(
_.copy(payloadBits=allRouterParams(i).user.payloadBits)
)
val egressParams = allEgressParams.filter(_.srcId == i).map(
_.copy(payloadBits=allRouterParams(i).user.payloadBits)
)
val noIn = inParams.size + ingressParams.size == 0
val noOut = outParams.size + egressParams.size == 0
if (noIn || noOut) {
println(s"Constellation WARNING: $nocName router $i seems to be unused, it will not be generated")
None
} else {
Some(LazyModule(new Router(
routerParams = allRouterParams(i),
preDiplomaticInParams = inParams,
preDiplomaticIngressParams = ingressParams,
outDests = outParams.map(_.destId),
egressIds = egressParams.map(_.egressId)
)(iP)))
}
}}.flatten
val ingressNodes = allIngressParams.map { u => IngressChannelSourceNode(u.destId) }
val egressNodes = allEgressParams.map { u => EgressChannelDestNode(u) }
// Generate channels between routers diplomatically
Seq.tabulate(nNodes, nNodes) { case (i, j) => if (i != j) {
val routerI = routers.find(_.nodeId == i)
val routerJ = routers.find(_.nodeId == j)
if (routerI.isDefined && routerJ.isDefined) {
val sourceNodes: Seq[ChannelSourceNode] = routerI.get.sourceNodes.filter(_.destId == j)
val destNodes: Seq[ChannelDestNode] = routerJ.get.destNodes.filter(_.destParams.srcId == i)
require (sourceNodes.size == destNodes.size)
(sourceNodes zip destNodes).foreach { case (src, dst) =>
val channelParam = allChannelParams.find(c => c.srcId == i && c.destId == j).get
router_sink_domains(j) {
implicit val p: Parameters = iP
(dst
:= ChannelWidthWidget(routerJ.get.payloadBits, routerI.get.payloadBits)
:= channelParam.channelGen(p)(src)
)
}
}
}
}}
// Generate terminal channels diplomatically
routers.foreach { dst => router_sink_domains(dst.nodeId) {
implicit val p: Parameters = iP
dst.ingressNodes.foreach(n => {
val ingressId = n.destParams.ingressId
require(dst.payloadBits <= allIngressParams(ingressId).payloadBits)
(n
:= IngressWidthWidget(dst.payloadBits, allIngressParams(ingressId).payloadBits)
:= ingressNodes(ingressId)
)
})
dst.egressNodes.foreach(n => {
val egressId = n.egressId
require(dst.payloadBits <= allEgressParams(egressId).payloadBits)
(egressNodes(egressId)
:= EgressWidthWidget(allEgressParams(egressId).payloadBits, dst.payloadBits)
:= n
)
})
}}
val debugNodes = routers.map { r =>
val sink = BundleBridgeSink[DebugBundle]()
sink := r.debugNode
sink
}
val ctrlNodes = if (nocParams.hasCtrl) {
(0 until nNodes).map { i =>
routers.find(_.nodeId == i).map { r =>
val sink = BundleBridgeSink[RouterCtrlBundle]()
sink := r.ctrlNode.get
sink
}
}
} else {
Nil
}
println(s"Constellation: $nocName Finished parameter validation")
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
println(s"Constellation: $nocName Starting NoC RTL generation")
val io = IO(new NoCTerminalIO(allIngressParams, allEgressParams)(iP) {
val router_clocks = Vec(nNodes, Input(new ClockBundle(ClockBundleParameters())))
val router_ctrl = if (nocParams.hasCtrl) Vec(nNodes, new RouterCtrlBundle) else Nil
})
(io.ingress zip ingressNodes.map(_.out(0)._1)).foreach { case (l,r) => r <> l }
(io.egress zip egressNodes .map(_.in (0)._1)).foreach { case (l,r) => l <> r }
(io.router_clocks zip clockSourceNodes.map(_.out(0)._1)).foreach { case (l,r) => l <> r }
if (nocParams.hasCtrl) {
ctrlNodes.zipWithIndex.map { case (c,i) =>
if (c.isDefined) {
io.router_ctrl(i) <> c.get.in(0)._1
} else {
io.router_ctrl(i) <> DontCare
}
}
}
// TODO: These assume a single clock-domain across the entire noc
val debug_va_stall_ctr = RegInit(0.U(64.W))
val debug_sa_stall_ctr = RegInit(0.U(64.W))
val debug_any_stall_ctr = debug_va_stall_ctr + debug_sa_stall_ctr
debug_va_stall_ctr := debug_va_stall_ctr + debugNodes.map(_.in(0)._1.va_stall.reduce(_+_)).reduce(_+_)
debug_sa_stall_ctr := debug_sa_stall_ctr + debugNodes.map(_.in(0)._1.sa_stall.reduce(_+_)).reduce(_+_)
dontTouch(debug_va_stall_ctr)
dontTouch(debug_sa_stall_ctr)
dontTouch(debug_any_stall_ctr)
def prepend(s: String) = Seq(nocName, s).mkString(".")
ElaborationArtefacts.add(prepend("noc.graphml"), graphML)
val adjList = routers.map { r =>
val outs = r.outParams.map(o => s"${o.destId}").mkString(" ")
val egresses = r.egressParams.map(e => s"e${e.egressId}").mkString(" ")
val ingresses = r.ingressParams.map(i => s"i${i.ingressId} ${r.nodeId}")
(Seq(s"${r.nodeId} $outs $egresses") ++ ingresses).mkString("\n")
}.mkString("\n")
ElaborationArtefacts.add(prepend("noc.adjlist"), adjList)
val xys = routers.map(r => {
val n = r.nodeId
val ids = (Seq(r.nodeId.toString)
++ r.egressParams.map(e => s"e${e.egressId}")
++ r.ingressParams.map(i => s"i${i.ingressId}")
)
val plotter = nocParams.topology.plotter
val coords = (Seq(plotter.node(r.nodeId))
++ Seq.tabulate(r.egressParams.size ) { i => plotter. egress(i, r. egressParams.size, r.nodeId) }
++ Seq.tabulate(r.ingressParams.size) { i => plotter.ingress(i, r.ingressParams.size, r.nodeId) }
)
(ids zip coords).map { case (i, (x, y)) => s"$i $x $y" }.mkString("\n")
}).mkString("\n")
ElaborationArtefacts.add(prepend("noc.xy"), xys)
val edgeProps = routers.map { r =>
val outs = r.outParams.map { o =>
(Seq(s"${r.nodeId} ${o.destId}") ++ (if (o.possibleFlows.size == 0) Some("unused") else None))
.mkString(" ")
}
val egresses = r.egressParams.map { e =>
(Seq(s"${r.nodeId} e${e.egressId}") ++ (if (e.possibleFlows.size == 0) Some("unused") else None))
.mkString(" ")
}
val ingresses = r.ingressParams.map { i =>
(Seq(s"i${i.ingressId} ${r.nodeId}") ++ (if (i.possibleFlows.size == 0) Some("unused") else None))
.mkString(" ")
}
(outs ++ egresses ++ ingresses).mkString("\n")
}.mkString("\n")
ElaborationArtefacts.add(prepend("noc.edgeprops"), edgeProps)
println(s"Constellation: $nocName Finished NoC RTL generation")
}
}
| module TLSplitACDxBENoC_acd_router_15ClockSinkDomain( // @[ClockDomain.scala:14:9]
output [1:0] auto_routers_debug_out_va_stall_0, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_debug_out_va_stall_1, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_debug_out_va_stall_2, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_debug_out_sa_stall_0, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_debug_out_sa_stall_1, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_debug_out_sa_stall_2, // @[LazyModuleImp.scala:107:25]
input auto_routers_egress_nodes_out_flit_ready, // @[LazyModuleImp.scala:107:25]
output auto_routers_egress_nodes_out_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_routers_egress_nodes_out_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_routers_egress_nodes_out_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
output [144:0] auto_routers_egress_nodes_out_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
output auto_routers_ingress_nodes_in_1_flit_ready, // @[LazyModuleImp.scala:107:25]
input auto_routers_ingress_nodes_in_1_flit_valid, // @[LazyModuleImp.scala:107:25]
input auto_routers_ingress_nodes_in_1_flit_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_routers_ingress_nodes_in_1_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
input [144:0] auto_routers_ingress_nodes_in_1_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_routers_ingress_nodes_in_1_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25]
output auto_routers_ingress_nodes_in_0_flit_ready, // @[LazyModuleImp.scala:107:25]
input auto_routers_ingress_nodes_in_0_flit_valid, // @[LazyModuleImp.scala:107:25]
input auto_routers_ingress_nodes_in_0_flit_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_routers_ingress_nodes_in_0_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
input [144:0] auto_routers_ingress_nodes_in_0_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_routers_ingress_nodes_in_0_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25]
output auto_routers_source_nodes_out_flit_0_valid, // @[LazyModuleImp.scala:107:25]
output auto_routers_source_nodes_out_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_routers_source_nodes_out_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
output [144:0] auto_routers_source_nodes_out_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_source_nodes_out_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_routers_source_nodes_out_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_source_nodes_out_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_routers_source_nodes_out_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_source_nodes_out_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_source_nodes_out_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_routers_source_nodes_out_credit_return, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_routers_source_nodes_out_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_routers_dest_nodes_in_flit_0_valid, // @[LazyModuleImp.scala:107:25]
input auto_routers_dest_nodes_in_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_routers_dest_nodes_in_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
input [144:0] auto_routers_dest_nodes_in_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_routers_dest_nodes_in_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_routers_dest_nodes_in_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_routers_dest_nodes_in_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_routers_dest_nodes_in_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_routers_dest_nodes_in_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_routers_dest_nodes_in_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_dest_nodes_in_credit_return, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_dest_nodes_in_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_clock_in_clock, // @[LazyModuleImp.scala:107:25]
input auto_clock_in_reset // @[LazyModuleImp.scala:107:25]
);
Router_15 routers ( // @[NoC.scala:67:22]
.clock (auto_clock_in_clock),
.reset (auto_clock_in_reset),
.auto_debug_out_va_stall_0 (auto_routers_debug_out_va_stall_0),
.auto_debug_out_va_stall_1 (auto_routers_debug_out_va_stall_1),
.auto_debug_out_va_stall_2 (auto_routers_debug_out_va_stall_2),
.auto_debug_out_sa_stall_0 (auto_routers_debug_out_sa_stall_0),
.auto_debug_out_sa_stall_1 (auto_routers_debug_out_sa_stall_1),
.auto_debug_out_sa_stall_2 (auto_routers_debug_out_sa_stall_2),
.auto_egress_nodes_out_flit_ready (auto_routers_egress_nodes_out_flit_ready),
.auto_egress_nodes_out_flit_valid (auto_routers_egress_nodes_out_flit_valid),
.auto_egress_nodes_out_flit_bits_head (auto_routers_egress_nodes_out_flit_bits_head),
.auto_egress_nodes_out_flit_bits_tail (auto_routers_egress_nodes_out_flit_bits_tail),
.auto_egress_nodes_out_flit_bits_payload (auto_routers_egress_nodes_out_flit_bits_payload),
.auto_ingress_nodes_in_1_flit_ready (auto_routers_ingress_nodes_in_1_flit_ready),
.auto_ingress_nodes_in_1_flit_valid (auto_routers_ingress_nodes_in_1_flit_valid),
.auto_ingress_nodes_in_1_flit_bits_head (auto_routers_ingress_nodes_in_1_flit_bits_head),
.auto_ingress_nodes_in_1_flit_bits_tail (auto_routers_ingress_nodes_in_1_flit_bits_tail),
.auto_ingress_nodes_in_1_flit_bits_payload (auto_routers_ingress_nodes_in_1_flit_bits_payload),
.auto_ingress_nodes_in_1_flit_bits_egress_id (auto_routers_ingress_nodes_in_1_flit_bits_egress_id),
.auto_ingress_nodes_in_0_flit_ready (auto_routers_ingress_nodes_in_0_flit_ready),
.auto_ingress_nodes_in_0_flit_valid (auto_routers_ingress_nodes_in_0_flit_valid),
.auto_ingress_nodes_in_0_flit_bits_head (auto_routers_ingress_nodes_in_0_flit_bits_head),
.auto_ingress_nodes_in_0_flit_bits_tail (auto_routers_ingress_nodes_in_0_flit_bits_tail),
.auto_ingress_nodes_in_0_flit_bits_payload (auto_routers_ingress_nodes_in_0_flit_bits_payload),
.auto_ingress_nodes_in_0_flit_bits_egress_id (auto_routers_ingress_nodes_in_0_flit_bits_egress_id),
.auto_source_nodes_out_flit_0_valid (auto_routers_source_nodes_out_flit_0_valid),
.auto_source_nodes_out_flit_0_bits_head (auto_routers_source_nodes_out_flit_0_bits_head),
.auto_source_nodes_out_flit_0_bits_tail (auto_routers_source_nodes_out_flit_0_bits_tail),
.auto_source_nodes_out_flit_0_bits_payload (auto_routers_source_nodes_out_flit_0_bits_payload),
.auto_source_nodes_out_flit_0_bits_flow_vnet_id (auto_routers_source_nodes_out_flit_0_bits_flow_vnet_id),
.auto_source_nodes_out_flit_0_bits_flow_ingress_node (auto_routers_source_nodes_out_flit_0_bits_flow_ingress_node),
.auto_source_nodes_out_flit_0_bits_flow_ingress_node_id (auto_routers_source_nodes_out_flit_0_bits_flow_ingress_node_id),
.auto_source_nodes_out_flit_0_bits_flow_egress_node (auto_routers_source_nodes_out_flit_0_bits_flow_egress_node),
.auto_source_nodes_out_flit_0_bits_flow_egress_node_id (auto_routers_source_nodes_out_flit_0_bits_flow_egress_node_id),
.auto_source_nodes_out_flit_0_bits_virt_channel_id (auto_routers_source_nodes_out_flit_0_bits_virt_channel_id),
.auto_source_nodes_out_credit_return (auto_routers_source_nodes_out_credit_return),
.auto_source_nodes_out_vc_free (auto_routers_source_nodes_out_vc_free),
.auto_dest_nodes_in_flit_0_valid (auto_routers_dest_nodes_in_flit_0_valid),
.auto_dest_nodes_in_flit_0_bits_head (auto_routers_dest_nodes_in_flit_0_bits_head),
.auto_dest_nodes_in_flit_0_bits_tail (auto_routers_dest_nodes_in_flit_0_bits_tail),
.auto_dest_nodes_in_flit_0_bits_payload (auto_routers_dest_nodes_in_flit_0_bits_payload),
.auto_dest_nodes_in_flit_0_bits_flow_vnet_id (auto_routers_dest_nodes_in_flit_0_bits_flow_vnet_id),
.auto_dest_nodes_in_flit_0_bits_flow_ingress_node (auto_routers_dest_nodes_in_flit_0_bits_flow_ingress_node),
.auto_dest_nodes_in_flit_0_bits_flow_ingress_node_id (auto_routers_dest_nodes_in_flit_0_bits_flow_ingress_node_id),
.auto_dest_nodes_in_flit_0_bits_flow_egress_node (auto_routers_dest_nodes_in_flit_0_bits_flow_egress_node),
.auto_dest_nodes_in_flit_0_bits_flow_egress_node_id (auto_routers_dest_nodes_in_flit_0_bits_flow_egress_node_id),
.auto_dest_nodes_in_flit_0_bits_virt_channel_id (auto_routers_dest_nodes_in_flit_0_bits_virt_channel_id),
.auto_dest_nodes_in_credit_return (auto_routers_dest_nodes_in_credit_return),
.auto_dest_nodes_in_vc_free (auto_routers_dest_nodes_in_vc_free)
); // @[NoC.scala:67:22]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module TLBuffer_a28d64s4k1z3u_1( // @[Buffer.scala:40:9]
input clock, // @[Buffer.scala:40:9]
input reset, // @[Buffer.scala:40:9]
output auto_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [27:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [27:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_d_bits_data // @[LazyModuleImp.scala:107:25]
);
wire auto_in_a_valid_0 = auto_in_a_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_opcode_0 = auto_in_a_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_param_0 = auto_in_a_bits_param; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_size_0 = auto_in_a_bits_size; // @[Buffer.scala:40:9]
wire [3:0] auto_in_a_bits_source_0 = auto_in_a_bits_source; // @[Buffer.scala:40:9]
wire [27:0] auto_in_a_bits_address_0 = auto_in_a_bits_address; // @[Buffer.scala:40:9]
wire [7:0] auto_in_a_bits_mask_0 = auto_in_a_bits_mask; // @[Buffer.scala:40:9]
wire [63:0] auto_in_a_bits_data_0 = auto_in_a_bits_data; // @[Buffer.scala:40:9]
wire auto_in_a_bits_corrupt_0 = auto_in_a_bits_corrupt; // @[Buffer.scala:40:9]
wire auto_in_d_ready_0 = auto_in_d_ready; // @[Buffer.scala:40:9]
wire auto_out_a_ready_0 = auto_out_a_ready; // @[Buffer.scala:40:9]
wire auto_out_d_valid_0 = auto_out_d_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_out_d_bits_opcode_0 = auto_out_d_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] auto_out_d_bits_size_0 = auto_out_d_bits_size; // @[Buffer.scala:40:9]
wire [3:0] auto_out_d_bits_source_0 = auto_out_d_bits_source; // @[Buffer.scala:40:9]
wire [63:0] auto_out_d_bits_data_0 = auto_out_d_bits_data; // @[Buffer.scala:40:9]
wire auto_out_d_bits_sink = 1'h0; // @[Decoupled.scala:362:21]
wire auto_out_d_bits_denied = 1'h0; // @[Decoupled.scala:362:21]
wire auto_out_d_bits_corrupt = 1'h0; // @[Decoupled.scala:362:21]
wire nodeOut_d_bits_sink = 1'h0; // @[Decoupled.scala:362:21]
wire nodeOut_d_bits_denied = 1'h0; // @[Decoupled.scala:362:21]
wire nodeOut_d_bits_corrupt = 1'h0; // @[Decoupled.scala:362:21]
wire [1:0] auto_out_d_bits_param = 2'h0; // @[Decoupled.scala:362:21]
wire nodeIn_a_ready; // @[MixedNode.scala:551:17]
wire [1:0] nodeOut_d_bits_param = 2'h0; // @[Decoupled.scala:362:21]
wire nodeIn_a_valid = auto_in_a_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_opcode = auto_in_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_param = auto_in_a_bits_param_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_size = auto_in_a_bits_size_0; // @[Buffer.scala:40:9]
wire [3:0] nodeIn_a_bits_source = auto_in_a_bits_source_0; // @[Buffer.scala:40:9]
wire [27:0] nodeIn_a_bits_address = auto_in_a_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] nodeIn_a_bits_mask = auto_in_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] nodeIn_a_bits_data = auto_in_a_bits_data_0; // @[Buffer.scala:40:9]
wire nodeIn_a_bits_corrupt = auto_in_a_bits_corrupt_0; // @[Buffer.scala:40:9]
wire nodeIn_d_ready = auto_in_d_ready_0; // @[Buffer.scala:40:9]
wire nodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] nodeIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [3:0] nodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] nodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire nodeOut_a_ready = auto_out_a_ready_0; // @[Buffer.scala:40:9]
wire nodeOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [27:0] nodeOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire nodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire nodeOut_d_ready; // @[MixedNode.scala:542:17]
wire nodeOut_d_valid = auto_out_d_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeOut_d_bits_opcode = auto_out_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] nodeOut_d_bits_size = auto_out_d_bits_size_0; // @[Buffer.scala:40:9]
wire [3:0] nodeOut_d_bits_source = auto_out_d_bits_source_0; // @[Buffer.scala:40:9]
wire [63:0] nodeOut_d_bits_data = auto_out_d_bits_data_0; // @[Buffer.scala:40:9]
wire auto_in_a_ready_0; // @[Buffer.scala:40:9]
wire [2:0] auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [1:0] auto_in_d_bits_param_0; // @[Buffer.scala:40:9]
wire [2:0] auto_in_d_bits_size_0; // @[Buffer.scala:40:9]
wire [3:0] auto_in_d_bits_source_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_sink_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_denied_0; // @[Buffer.scala:40:9]
wire [63:0] auto_in_d_bits_data_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_in_d_valid_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_param_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_size_0; // @[Buffer.scala:40:9]
wire [3:0] auto_out_a_bits_source_0; // @[Buffer.scala:40:9]
wire [27:0] auto_out_a_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] auto_out_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] auto_out_a_bits_data_0; // @[Buffer.scala:40:9]
wire auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_out_a_valid_0; // @[Buffer.scala:40:9]
wire auto_out_d_ready_0; // @[Buffer.scala:40:9]
assign auto_in_a_ready_0 = nodeIn_a_ready; // @[Buffer.scala:40:9]
assign auto_in_d_valid_0 = nodeIn_d_valid; // @[Buffer.scala:40:9]
assign auto_in_d_bits_opcode_0 = nodeIn_d_bits_opcode; // @[Buffer.scala:40:9]
assign auto_in_d_bits_param_0 = nodeIn_d_bits_param; // @[Buffer.scala:40:9]
assign auto_in_d_bits_size_0 = nodeIn_d_bits_size; // @[Buffer.scala:40:9]
assign auto_in_d_bits_source_0 = nodeIn_d_bits_source; // @[Buffer.scala:40:9]
assign auto_in_d_bits_sink_0 = nodeIn_d_bits_sink; // @[Buffer.scala:40:9]
assign auto_in_d_bits_denied_0 = nodeIn_d_bits_denied; // @[Buffer.scala:40:9]
assign auto_in_d_bits_data_0 = nodeIn_d_bits_data; // @[Buffer.scala:40:9]
assign auto_in_d_bits_corrupt_0 = nodeIn_d_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_a_valid_0 = nodeOut_a_valid; // @[Buffer.scala:40:9]
assign auto_out_a_bits_opcode_0 = nodeOut_a_bits_opcode; // @[Buffer.scala:40:9]
assign auto_out_a_bits_param_0 = nodeOut_a_bits_param; // @[Buffer.scala:40:9]
assign auto_out_a_bits_size_0 = nodeOut_a_bits_size; // @[Buffer.scala:40:9]
assign auto_out_a_bits_source_0 = nodeOut_a_bits_source; // @[Buffer.scala:40:9]
assign auto_out_a_bits_address_0 = nodeOut_a_bits_address; // @[Buffer.scala:40:9]
assign auto_out_a_bits_mask_0 = nodeOut_a_bits_mask; // @[Buffer.scala:40:9]
assign auto_out_a_bits_data_0 = nodeOut_a_bits_data; // @[Buffer.scala:40:9]
assign auto_out_a_bits_corrupt_0 = nodeOut_a_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_d_ready_0 = nodeOut_d_ready; // @[Buffer.scala:40:9]
TLMonitor_60 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (nodeIn_a_ready), // @[MixedNode.scala:551:17]
.io_in_a_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_in_a_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_a_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_in_a_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_in_a_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_in_a_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_in_a_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_in_a_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_in_a_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_d_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_in_d_valid (nodeIn_d_valid), // @[MixedNode.scala:551:17]
.io_in_d_bits_opcode (nodeIn_d_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_d_bits_param (nodeIn_d_bits_param), // @[MixedNode.scala:551:17]
.io_in_d_bits_size (nodeIn_d_bits_size), // @[MixedNode.scala:551:17]
.io_in_d_bits_source (nodeIn_d_bits_source), // @[MixedNode.scala:551:17]
.io_in_d_bits_sink (nodeIn_d_bits_sink), // @[MixedNode.scala:551:17]
.io_in_d_bits_denied (nodeIn_d_bits_denied), // @[MixedNode.scala:551:17]
.io_in_d_bits_data (nodeIn_d_bits_data), // @[MixedNode.scala:551:17]
.io_in_d_bits_corrupt (nodeIn_d_bits_corrupt) // @[MixedNode.scala:551:17]
); // @[Nodes.scala:27:25]
Queue2_TLBundleA_a28d64s4k1z3u_1 nodeOut_a_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeIn_a_ready),
.io_enq_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_enq_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_enq_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_enq_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_enq_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_enq_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_enq_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_enq_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_enq_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_deq_ready (nodeOut_a_ready), // @[MixedNode.scala:542:17]
.io_deq_valid (nodeOut_a_valid),
.io_deq_bits_opcode (nodeOut_a_bits_opcode),
.io_deq_bits_param (nodeOut_a_bits_param),
.io_deq_bits_size (nodeOut_a_bits_size),
.io_deq_bits_source (nodeOut_a_bits_source),
.io_deq_bits_address (nodeOut_a_bits_address),
.io_deq_bits_mask (nodeOut_a_bits_mask),
.io_deq_bits_data (nodeOut_a_bits_data),
.io_deq_bits_corrupt (nodeOut_a_bits_corrupt)
); // @[Decoupled.scala:362:21]
Queue2_TLBundleD_a28d64s4k1z3u_1 nodeIn_d_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeOut_d_ready),
.io_enq_valid (nodeOut_d_valid), // @[MixedNode.scala:542:17]
.io_enq_bits_opcode (nodeOut_d_bits_opcode), // @[MixedNode.scala:542:17]
.io_enq_bits_size (nodeOut_d_bits_size), // @[MixedNode.scala:542:17]
.io_enq_bits_source (nodeOut_d_bits_source), // @[MixedNode.scala:542:17]
.io_enq_bits_data (nodeOut_d_bits_data), // @[MixedNode.scala:542:17]
.io_deq_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_deq_valid (nodeIn_d_valid),
.io_deq_bits_opcode (nodeIn_d_bits_opcode),
.io_deq_bits_param (nodeIn_d_bits_param),
.io_deq_bits_size (nodeIn_d_bits_size),
.io_deq_bits_source (nodeIn_d_bits_source),
.io_deq_bits_sink (nodeIn_d_bits_sink),
.io_deq_bits_denied (nodeIn_d_bits_denied),
.io_deq_bits_data (nodeIn_d_bits_data),
.io_deq_bits_corrupt (nodeIn_d_bits_corrupt)
); // @[Decoupled.scala:362:21]
assign auto_in_a_ready = auto_in_a_ready_0; // @[Buffer.scala:40:9]
assign auto_in_d_valid = auto_in_d_valid_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_opcode = auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_param = auto_in_d_bits_param_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_size = auto_in_d_bits_size_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_source = auto_in_d_bits_source_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_sink = auto_in_d_bits_sink_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_denied = auto_in_d_bits_denied_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_data = auto_in_d_bits_data_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_corrupt = auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_a_valid = auto_out_a_valid_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_opcode = auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_param = auto_out_a_bits_param_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_size = auto_out_a_bits_size_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_source = auto_out_a_bits_source_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_address = auto_out_a_bits_address_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_mask = auto_out_a_bits_mask_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_data = auto_out_a_bits_data_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_corrupt = auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_d_ready = auto_out_d_ready_0; // @[Buffer.scala:40:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_63( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_80 io_out_source_valid ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_174( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_306 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module MacUnit_53( // @[PE.scala:14:7]
input clock, // @[PE.scala:14:7]
input reset, // @[PE.scala:14:7]
input [7:0] io_in_a, // @[PE.scala:16:14]
input [7:0] io_in_b, // @[PE.scala:16:14]
input [31:0] io_in_c, // @[PE.scala:16:14]
output [19:0] io_out_d // @[PE.scala:16:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7]
wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7]
wire [31:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7]
wire [19:0] io_out_d_0; // @[PE.scala:14:7]
wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7]
wire [32:0] _io_out_d_T_1 = {{17{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[31], io_in_c_0}; // @[PE.scala:14:7]
wire [31:0] _io_out_d_T_2 = _io_out_d_T_1[31:0]; // @[Arithmetic.scala:93:54]
wire [31:0] _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54]
assign io_out_d_0 = _io_out_d_T_3[19:0]; // @[PE.scala:14:7, :23:12]
assign io_out_d = io_out_d_0; // @[PE.scala:14:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
| module TLBuffer_9( // @[Buffer.scala:40:9]
input clock, // @[Buffer.scala:40:9]
input reset // @[Buffer.scala:40:9]
);
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module MacUnit_153( // @[PE.scala:14:7]
input clock, // @[PE.scala:14:7]
input reset, // @[PE.scala:14:7]
input [7:0] io_in_a, // @[PE.scala:16:14]
input [7:0] io_in_b, // @[PE.scala:16:14]
input [31:0] io_in_c, // @[PE.scala:16:14]
output [19:0] io_out_d // @[PE.scala:16:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7]
wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7]
wire [31:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7]
wire [19:0] io_out_d_0; // @[PE.scala:14:7]
wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7]
wire [32:0] _io_out_d_T_1 = {{17{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[31], io_in_c_0}; // @[PE.scala:14:7]
wire [31:0] _io_out_d_T_2 = _io_out_d_T_1[31:0]; // @[Arithmetic.scala:93:54]
wire [31:0] _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54]
assign io_out_d_0 = _io_out_d_T_3[19:0]; // @[PE.scala:14:7, :23:12]
assign io_out_d = io_out_d_0; // @[PE.scala:14:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Transposer.scala:
package gemmini
import chisel3._
import chisel3.util._
import Util._
trait Transposer[T <: Data] extends Module {
def dim: Int
def dataType: T
val io = IO(new Bundle {
val inRow = Flipped(Decoupled(Vec(dim, dataType)))
val outCol = Decoupled(Vec(dim, dataType))
})
}
class PipelinedTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] {
require(isPow2(dim))
val regArray = Seq.fill(dim, dim)(Reg(dataType))
val regArrayT = regArray.transpose
val sMoveUp :: sMoveLeft :: Nil = Enum(2)
val state = RegInit(sMoveUp)
val leftCounter = RegInit(0.U(log2Ceil(dim+1).W)) //(io.inRow.fire && state === sMoveLeft, dim+1)
val upCounter = RegInit(0.U(log2Ceil(dim+1).W)) //Counter(io.inRow.fire && state === sMoveUp, dim+1)
io.outCol.valid := 0.U
io.inRow.ready := 0.U
switch(state) {
is(sMoveUp) {
io.inRow.ready := upCounter <= dim.U
io.outCol.valid := leftCounter > 0.U
when(io.inRow.fire) {
upCounter := upCounter + 1.U
}
when(upCounter === (dim-1).U) {
state := sMoveLeft
leftCounter := 0.U
}
when(io.outCol.fire) {
leftCounter := leftCounter - 1.U
}
}
is(sMoveLeft) {
io.inRow.ready := leftCounter <= dim.U // TODO: this is naive
io.outCol.valid := upCounter > 0.U
when(leftCounter === (dim-1).U) {
state := sMoveUp
}
when(io.inRow.fire) {
leftCounter := leftCounter + 1.U
upCounter := 0.U
}
when(io.outCol.fire) {
upCounter := upCounter - 1.U
}
}
}
// Propagate input from bottom row to top row systolically in the move up phase
// TODO: need to iterate over columns to connect Chisel values of type T
// Should be able to operate directly on the Vec, but Seq and Vec don't mix (try Array?)
for (colIdx <- 0 until dim) {
regArray.foldRight(io.inRow.bits(colIdx)) {
case (regRow, prevReg) =>
when (state === sMoveUp) {
regRow(colIdx) := prevReg
}
regRow(colIdx)
}
}
// Propagate input from right side to left side systolically in the move left phase
for (rowIdx <- 0 until dim) {
regArrayT.foldRight(io.inRow.bits(rowIdx)) {
case (regCol, prevReg) =>
when (state === sMoveLeft) {
regCol(rowIdx) := prevReg
}
regCol(rowIdx)
}
}
// Pull from the left side or the top side based on the state
for (idx <- 0 until dim) {
when (state === sMoveUp) {
io.outCol.bits(idx) := regArray(0)(idx)
}.elsewhen(state === sMoveLeft) {
io.outCol.bits(idx) := regArrayT(0)(idx)
}.otherwise {
io.outCol.bits(idx) := DontCare
}
}
}
class AlwaysOutTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] {
require(isPow2(dim))
val LEFT_DIR = 0.U(1.W)
val UP_DIR = 1.U(1.W)
class PE extends Module {
val io = IO(new Bundle {
val inR = Input(dataType)
val inD = Input(dataType)
val outL = Output(dataType)
val outU = Output(dataType)
val dir = Input(UInt(1.W))
val en = Input(Bool())
})
val reg = RegEnable(Mux(io.dir === LEFT_DIR, io.inR, io.inD), io.en)
io.outU := reg
io.outL := reg
}
val pes = Seq.fill(dim,dim)(Module(new PE))
val counter = RegInit(0.U((log2Ceil(dim) max 1).W)) // TODO replace this with a standard Chisel counter
val dir = RegInit(LEFT_DIR)
// Wire up horizontal signals
for (row <- 0 until dim; col <- 0 until dim) {
val right_in = if (col == dim-1) io.inRow.bits(row) else pes(row)(col+1).io.outL
pes(row)(col).io.inR := right_in
}
// Wire up vertical signals
for (row <- 0 until dim; col <- 0 until dim) {
val down_in = if (row == dim-1) io.inRow.bits(col) else pes(row+1)(col).io.outU
pes(row)(col).io.inD := down_in
}
// Wire up global signals
pes.flatten.foreach(_.io.dir := dir)
pes.flatten.foreach(_.io.en := io.inRow.fire)
io.outCol.valid := true.B
io.inRow.ready := true.B
val left_out = VecInit(pes.transpose.head.map(_.io.outL))
val up_out = VecInit(pes.head.map(_.io.outU))
io.outCol.bits := Mux(dir === LEFT_DIR, left_out, up_out)
when (io.inRow.fire) {
counter := wrappingAdd(counter, 1.U, dim)
}
when (counter === (dim-1).U && io.inRow.fire) {
dir := ~dir
}
}
class NaiveTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] {
val regArray = Seq.fill(dim, dim)(Reg(dataType))
val regArrayT = regArray.transpose
// state = 0 => filling regArray row-wise, state = 1 => draining regArray column-wise
val state = RegInit(0.U(1.W))
val countInc = io.inRow.fire || io.outCol.fire
val (countValue, countWrap) = Counter(countInc, dim)
io.inRow.ready := state === 0.U
io.outCol.valid := state === 1.U
for (i <- 0 until dim) {
for (j <- 0 until dim) {
when(countValue === i.U && io.inRow.fire) {
regArray(i)(j) := io.inRow.bits(j)
}
}
}
for (i <- 0 until dim) {
io.outCol.bits(i) := 0.U
for (j <- 0 until dim) {
when(countValue === j.U) {
io.outCol.bits(i) := regArrayT(j)(i)
}
}
}
when (io.inRow.fire && countWrap) {
state := 1.U
}
when (io.outCol.fire && countWrap) {
state := 0.U
}
assert(!(state === 0.U) || !io.outCol.fire)
assert(!(state === 1.U) || !io.inRow.fire)
}
| module PE_176( // @[Transposer.scala:100:9]
input clock, // @[Transposer.scala:100:9]
input reset, // @[Transposer.scala:100:9]
input [7:0] io_inR, // @[Transposer.scala:101:16]
input [7:0] io_inD, // @[Transposer.scala:101:16]
output [7:0] io_outL, // @[Transposer.scala:101:16]
output [7:0] io_outU, // @[Transposer.scala:101:16]
input io_dir, // @[Transposer.scala:101:16]
input io_en // @[Transposer.scala:101:16]
);
wire [7:0] io_inR_0 = io_inR; // @[Transposer.scala:100:9]
wire [7:0] io_inD_0 = io_inD; // @[Transposer.scala:100:9]
wire io_dir_0 = io_dir; // @[Transposer.scala:100:9]
wire io_en_0 = io_en; // @[Transposer.scala:100:9]
wire [7:0] io_outL_0; // @[Transposer.scala:100:9]
wire [7:0] io_outU_0; // @[Transposer.scala:100:9]
wire _reg_T = ~io_dir_0; // @[Transposer.scala:100:9, :110:36]
wire [7:0] _reg_T_1 = _reg_T ? io_inR_0 : io_inD_0; // @[Transposer.scala:100:9, :110:{28,36}]
reg [7:0] reg_0; // @[Transposer.scala:110:24]
assign io_outL_0 = reg_0; // @[Transposer.scala:100:9, :110:24]
assign io_outU_0 = reg_0; // @[Transposer.scala:100:9, :110:24]
always @(posedge clock) begin // @[Transposer.scala:100:9]
if (io_en_0) // @[Transposer.scala:100:9]
reg_0 <= _reg_T_1; // @[Transposer.scala:110:{24,28}]
always @(posedge)
assign io_outL = io_outL_0; // @[Transposer.scala:100:9]
assign io_outU = io_outU_0; // @[Transposer.scala:100:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_11( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [4:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [4:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [3:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [4:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [31:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7]
wire [3:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [4:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [8:0] c_first_beats1_decode = 9'h0; // @[Edges.scala:220:59]
wire [8:0] c_first_beats1 = 9'h0; // @[Edges.scala:221:14]
wire [8:0] _c_first_count_T = 9'h0; // @[Edges.scala:234:27]
wire [8:0] c_first_count = 9'h0; // @[Edges.scala:234:25]
wire [8:0] _c_first_counter_T = 9'h0; // @[Edges.scala:236:21]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_31 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_33 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_37 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_39 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_43 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_45 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_49 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_51 = 1'h1; // @[Parameters.scala:57:20]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [8:0] c_first_counter1 = 9'h1FF; // @[Edges.scala:230:28]
wire [9:0] _c_first_counter1_T = 10'h3FF; // @[Edges.scala:230:28]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_first_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_first_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_first_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_first_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_set_wo_ready_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_set_wo_ready_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_opcodes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_opcodes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_sizes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_sizes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_opcodes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_opcodes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_sizes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_sizes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_probe_ack_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_probe_ack_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_probe_ack_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_probe_ack_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_4_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_5_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_first_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_first_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_first_WIRE_2_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_first_WIRE_3_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] c_sizes_set_interm = 5'h0; // @[Monitor.scala:755:40]
wire [4:0] _c_set_wo_ready_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_set_wo_ready_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_set_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_set_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_opcodes_set_interm_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_opcodes_set_interm_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_sizes_set_interm_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_sizes_set_interm_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_sizes_set_interm_T = 5'h0; // @[Monitor.scala:766:51]
wire [4:0] _c_opcodes_set_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_opcodes_set_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_sizes_set_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_sizes_set_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_probe_ack_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_probe_ack_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_probe_ack_WIRE_2_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_probe_ack_WIRE_3_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _same_cycle_resp_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _same_cycle_resp_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _same_cycle_resp_WIRE_2_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _same_cycle_resp_WIRE_3_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _same_cycle_resp_WIRE_4_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _same_cycle_resp_WIRE_5_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_first_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_first_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_first_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_first_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] _c_set_wo_ready_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_set_wo_ready_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_opcodes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_sizes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_opcodes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_sizes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_sizes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_probe_ack_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_probe_ack_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_probe_ack_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_probe_ack_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hFF; // @[Monitor.scala:612:57]
wire [15:0] _c_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hFF; // @[Monitor.scala:724:57]
wire [16:0] _a_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hFF; // @[Monitor.scala:612:57]
wire [16:0] _c_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hFF; // @[Monitor.scala:724:57]
wire [15:0] _a_size_lookup_T_3 = 16'h100; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h100; // @[Monitor.scala:612:51]
wire [15:0] _c_size_lookup_T_3 = 16'h100; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h100; // @[Monitor.scala:724:51]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [259:0] _c_sizes_set_T_1 = 260'h0; // @[Monitor.scala:768:52]
wire [7:0] _c_opcodes_set_T = 8'h0; // @[Monitor.scala:767:79]
wire [7:0] _c_sizes_set_T = 8'h0; // @[Monitor.scala:768:77]
wire [258:0] _c_opcodes_set_T_1 = 259'h0; // @[Monitor.scala:767:54]
wire [4:0] _c_sizes_set_interm_T_1 = 5'h1; // @[Monitor.scala:766:59]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [31:0] _c_set_wo_ready_T = 32'h1; // @[OneHot.scala:58:35]
wire [31:0] _c_set_T = 32'h1; // @[OneHot.scala:58:35]
wire [135:0] c_sizes_set = 136'h0; // @[Monitor.scala:741:34]
wire [67:0] c_opcodes_set = 68'h0; // @[Monitor.scala:740:34]
wire [16:0] c_set = 17'h0; // @[Monitor.scala:738:34]
wire [16:0] c_set_wo_ready = 17'h0; // @[Monitor.scala:739:34]
wire [11:0] _c_first_beats1_decode_T_2 = 12'h0; // @[package.scala:243:46]
wire [11:0] _c_first_beats1_decode_T_1 = 12'hFFF; // @[package.scala:243:76]
wire [26:0] _c_first_beats1_decode_T = 27'hFFF; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_size_lookup_T_2 = 4'h8; // @[Monitor.scala:641:117]
wire [3:0] _d_sizes_clr_T = 4'h8; // @[Monitor.scala:681:48]
wire [3:0] _c_size_lookup_T_2 = 4'h8; // @[Monitor.scala:750:119]
wire [3:0] _d_sizes_clr_T_6 = 4'h8; // @[Monitor.scala:791:48]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [4:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_uncommonBits_T_4 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = io_in_a_bits_source_0 == 5'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] _source_ok_T_1 = io_in_a_bits_source_0[4:2]; // @[Monitor.scala:36:7]
wire [2:0] _source_ok_T_7 = io_in_a_bits_source_0[4:2]; // @[Monitor.scala:36:7]
wire [2:0] _source_ok_T_13 = io_in_a_bits_source_0[4:2]; // @[Monitor.scala:36:7]
wire [2:0] _source_ok_T_19 = io_in_a_bits_source_0[4:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_2 = _source_ok_T_1 == 3'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_8 = _source_ok_T_7 == 3'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_14 = _source_ok_T_13 == 3'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_20 = _source_ok_T_19 == 3'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31]
wire _source_ok_T_25 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_26 = _source_ok_T_25 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_27 = _source_ok_T_26 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_27 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46]
wire [26:0] _GEN = 27'hFFF << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [26:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [26:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [26:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [11:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [31:0] _is_aligned_T = {20'h0, io_in_a_bits_address_0[11:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 32'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 4'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_28 = io_in_d_bits_source_0 == 5'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_28; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] _source_ok_T_29 = io_in_d_bits_source_0[4:2]; // @[Monitor.scala:36:7]
wire [2:0] _source_ok_T_35 = io_in_d_bits_source_0[4:2]; // @[Monitor.scala:36:7]
wire [2:0] _source_ok_T_41 = io_in_d_bits_source_0[4:2]; // @[Monitor.scala:36:7]
wire [2:0] _source_ok_T_47 = io_in_d_bits_source_0[4:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_30 = _source_ok_T_29 == 3'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_32 = _source_ok_T_30; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_34 = _source_ok_T_32; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_1 = _source_ok_T_34; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_36 = _source_ok_T_35 == 3'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_38 = _source_ok_T_36; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_40 = _source_ok_T_38; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_2 = _source_ok_T_40; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_42 = _source_ok_T_41 == 3'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_44 = _source_ok_T_42; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_46 = _source_ok_T_44; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_3 = _source_ok_T_46; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_48 = _source_ok_T_47 == 3'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_50 = _source_ok_T_48; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_52 = _source_ok_T_50; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_4 = _source_ok_T_52; // @[Parameters.scala:1138:31]
wire _source_ok_T_53 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_54 = _source_ok_T_53 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_55 = _source_ok_T_54 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_55 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46]
wire _T_1361 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_1361; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_1361; // @[Decoupled.scala:51:35]
wire [11:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [8:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [8:0] a_first_counter; // @[Edges.scala:229:27]
wire [9:0] _a_first_counter1_T = {1'h0, a_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] a_first_counter1 = _a_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [8:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [3:0] size; // @[Monitor.scala:389:22]
reg [4:0] source; // @[Monitor.scala:390:22]
reg [31:0] address; // @[Monitor.scala:391:22]
wire _T_1434 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1434; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1434; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1434; // @[Decoupled.scala:51:35]
wire [26:0] _GEN_0 = 27'hFFF << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [11:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [8:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T = {1'h0, d_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1 = _d_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [3:0] size_1; // @[Monitor.scala:540:22]
reg [4:0] source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [16:0] inflight; // @[Monitor.scala:614:27]
reg [67:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [135:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [11:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [8:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [8:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] a_first_counter1_1 = _a_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [8:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [11:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire [8:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1_1 = _d_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [16:0] a_set; // @[Monitor.scala:626:34]
wire [16:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [67:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [135:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [7:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [7:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [7:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [7:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [7:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [67:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [67:0] _a_opcode_lookup_T_6 = {64'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [67:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[67:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [7:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [7:0] _GEN_2 = {io_in_d_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :641:65]
wire [7:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65]
wire [7:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_2; // @[Monitor.scala:641:65, :681:99]
wire [7:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65, :750:67]
wire [7:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_2; // @[Monitor.scala:641:65, :791:99]
wire [135:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [135:0] _a_size_lookup_T_6 = {128'h0, _a_size_lookup_T_1[7:0]}; // @[Monitor.scala:641:{40,91}]
wire [135:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[135:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[7:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [4:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [31:0] _GEN_3 = 32'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [31:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_3; // @[OneHot.scala:58:35]
wire [31:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_3; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[16:0] : 17'h0; // @[OneHot.scala:58:35]
wire _T_1287 = _T_1361 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_1287 ? _a_set_T[16:0] : 17'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_1287 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [4:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [4:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[4:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_1287 ? _a_sizes_set_interm_T_1 : 5'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [7:0] _a_opcodes_set_T = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [258:0] _a_opcodes_set_T_1 = {255'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_1287 ? _a_opcodes_set_T_1[67:0] : 68'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [7:0] _a_sizes_set_T = {io_in_a_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :660:77]
wire [259:0] _a_sizes_set_T_1 = {255'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_1287 ? _a_sizes_set_T_1[135:0] : 136'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [16:0] d_clr; // @[Monitor.scala:664:34]
wire [16:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [67:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [135:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_1333 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [31:0] _GEN_5 = 32'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [31:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [31:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [31:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [31:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_1333 & ~d_release_ack ? _d_clr_wo_ready_T[16:0] : 17'h0; // @[OneHot.scala:58:35]
wire _T_1302 = _T_1434 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_1302 ? _d_clr_T[16:0] : 17'h0; // @[OneHot.scala:58:35]
wire [270:0] _d_opcodes_clr_T_5 = 271'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_1302 ? _d_opcodes_clr_T_5[67:0] : 68'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [270:0] _d_sizes_clr_T_5 = 271'hFF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_1302 ? _d_sizes_clr_T_5[135:0] : 136'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [16:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [16:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [16:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [67:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [67:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [67:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [135:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [135:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [135:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [16:0] inflight_1; // @[Monitor.scala:726:35]
wire [16:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [67:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [67:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [135:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [135:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [11:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[11:3]; // @[package.scala:243:46]
wire [8:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1_2 = _d_first_counter1_T_2[8:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [7:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [67:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [67:0] _c_opcode_lookup_T_6 = {64'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [67:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[67:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [135:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [135:0] _c_size_lookup_T_6 = {128'h0, _c_size_lookup_T_1[7:0]}; // @[Monitor.scala:750:{42,93}]
wire [135:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[135:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[7:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [16:0] d_clr_1; // @[Monitor.scala:774:34]
wire [16:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [67:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [135:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_1405 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_1405 & d_release_ack_1 ? _d_clr_wo_ready_T_1[16:0] : 17'h0; // @[OneHot.scala:58:35]
wire _T_1387 = _T_1434 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_1387 ? _d_clr_T_1[16:0] : 17'h0; // @[OneHot.scala:58:35]
wire [270:0] _d_opcodes_clr_T_11 = 271'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_1387 ? _d_opcodes_clr_T_11[67:0] : 68'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [270:0] _d_sizes_clr_T_11 = 271'hFF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_1387 ? _d_sizes_clr_T_11[135:0] : 136'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 5'h0; // @[Monitor.scala:36:7, :795:113]
wire [16:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [16:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [67:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [67:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [135:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [135:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_42( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [9:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [27:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [9:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [9:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [27:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [9:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_27 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_29 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_33 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_35 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_39 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_41 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_45 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_47 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_51 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_53 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_57 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_59 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_63 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_65 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_69 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_71 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_75 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_77 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_81 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_83 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_87 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_89 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_93 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_95 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_99 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_101 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_105 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_107 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_111 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_113 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_117 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_119 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_123 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_125 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_129 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_131 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_165 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_167 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_171 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_173 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_177 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_179 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_183 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_185 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_189 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_191 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_195 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_197 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_201 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_203 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_207 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_209 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_213 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_215 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_219 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_221 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_225 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_227 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_231 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_233 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_237 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_239 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_243 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_245 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_249 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_251 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_255 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_257 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_261 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_263 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_267 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_269 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_273 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_275 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_279 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_281 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_285 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_287 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_291 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_293 = 1'h1; // @[Parameters.scala:57:20]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_first_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_first_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_first_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_first_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_set_wo_ready_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_set_wo_ready_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_opcodes_set_interm_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_opcodes_set_interm_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_sizes_set_interm_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_sizes_set_interm_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_opcodes_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_opcodes_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_sizes_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_sizes_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_probe_ack_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_probe_ack_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_probe_ack_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_probe_ack_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _same_cycle_resp_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _same_cycle_resp_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _same_cycle_resp_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _same_cycle_resp_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _same_cycle_resp_WIRE_4_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _same_cycle_resp_WIRE_5_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [9:0] _c_first_WIRE_bits_source = 10'h0; // @[Bundles.scala:265:74]
wire [9:0] _c_first_WIRE_1_bits_source = 10'h0; // @[Bundles.scala:265:61]
wire [9:0] _c_first_WIRE_2_bits_source = 10'h0; // @[Bundles.scala:265:74]
wire [9:0] _c_first_WIRE_3_bits_source = 10'h0; // @[Bundles.scala:265:61]
wire [9:0] _c_set_wo_ready_WIRE_bits_source = 10'h0; // @[Bundles.scala:265:74]
wire [9:0] _c_set_wo_ready_WIRE_1_bits_source = 10'h0; // @[Bundles.scala:265:61]
wire [9:0] _c_set_WIRE_bits_source = 10'h0; // @[Bundles.scala:265:74]
wire [9:0] _c_set_WIRE_1_bits_source = 10'h0; // @[Bundles.scala:265:61]
wire [9:0] _c_opcodes_set_interm_WIRE_bits_source = 10'h0; // @[Bundles.scala:265:74]
wire [9:0] _c_opcodes_set_interm_WIRE_1_bits_source = 10'h0; // @[Bundles.scala:265:61]
wire [9:0] _c_sizes_set_interm_WIRE_bits_source = 10'h0; // @[Bundles.scala:265:74]
wire [9:0] _c_sizes_set_interm_WIRE_1_bits_source = 10'h0; // @[Bundles.scala:265:61]
wire [9:0] _c_opcodes_set_WIRE_bits_source = 10'h0; // @[Bundles.scala:265:74]
wire [9:0] _c_opcodes_set_WIRE_1_bits_source = 10'h0; // @[Bundles.scala:265:61]
wire [9:0] _c_sizes_set_WIRE_bits_source = 10'h0; // @[Bundles.scala:265:74]
wire [9:0] _c_sizes_set_WIRE_1_bits_source = 10'h0; // @[Bundles.scala:265:61]
wire [9:0] _c_probe_ack_WIRE_bits_source = 10'h0; // @[Bundles.scala:265:74]
wire [9:0] _c_probe_ack_WIRE_1_bits_source = 10'h0; // @[Bundles.scala:265:61]
wire [9:0] _c_probe_ack_WIRE_2_bits_source = 10'h0; // @[Bundles.scala:265:74]
wire [9:0] _c_probe_ack_WIRE_3_bits_source = 10'h0; // @[Bundles.scala:265:61]
wire [9:0] _same_cycle_resp_WIRE_bits_source = 10'h0; // @[Bundles.scala:265:74]
wire [9:0] _same_cycle_resp_WIRE_1_bits_source = 10'h0; // @[Bundles.scala:265:61]
wire [9:0] _same_cycle_resp_WIRE_2_bits_source = 10'h0; // @[Bundles.scala:265:74]
wire [9:0] _same_cycle_resp_WIRE_3_bits_source = 10'h0; // @[Bundles.scala:265:61]
wire [9:0] _same_cycle_resp_WIRE_4_bits_source = 10'h0; // @[Bundles.scala:265:74]
wire [9:0] _same_cycle_resp_WIRE_5_bits_source = 10'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [8194:0] _c_opcodes_set_T_1 = 8195'h0; // @[Monitor.scala:767:54]
wire [8194:0] _c_sizes_set_T_1 = 8195'h0; // @[Monitor.scala:768:52]
wire [12:0] _c_opcodes_set_T = 13'h0; // @[Monitor.scala:767:79]
wire [12:0] _c_sizes_set_T = 13'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [1023:0] _c_set_wo_ready_T = 1024'h1; // @[OneHot.scala:58:35]
wire [1023:0] _c_set_T = 1024'h1; // @[OneHot.scala:58:35]
wire [2051:0] c_opcodes_set = 2052'h0; // @[Monitor.scala:740:34]
wire [2051:0] c_sizes_set = 2052'h0; // @[Monitor.scala:741:34]
wire [512:0] c_set = 513'h0; // @[Monitor.scala:738:34]
wire [512:0] c_set_wo_ready = 513'h0; // @[Monitor.scala:739:34]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [9:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_44 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_45 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_46 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_47 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_48 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_49 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_50 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_51 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_52 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_53 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_54 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_55 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_56 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_57 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_58 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_59 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_60 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_61 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_62 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_63 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_64 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_65 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_66 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_67 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_68 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_69 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_70 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_71 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_72 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_73 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_74 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_75 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_76 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_77 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_78 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_79 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_80 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_81 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_82 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_83 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_84 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_85 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_86 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_87 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_88 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_89 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_90 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_91 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_92 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_93 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_94 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_95 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_96 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_97 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_98 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_99 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_100 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_101 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_102 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_103 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_104 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_105 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_106 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_107 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_108 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_109 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_110 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_111 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_112 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_113 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_114 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_115 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_116 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_117 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_118 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_119 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_120 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_121 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_122 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_123 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_124 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_125 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_126 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_127 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_128 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_129 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_130 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_131 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_132 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_133 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_134 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_135 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_136 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_137 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_138 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_139 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_140 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_141 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_142 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_143 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_144 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_145 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_146 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_147 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_148 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_149 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_150 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_151 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_152 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_153 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_154 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_155 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_156 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_157 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_158 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_159 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_160 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_161 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_162 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_163 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_164 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_165 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_166 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_167 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_168 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_169 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_170 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_171 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_172 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_173 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_174 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_175 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_176 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_177 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_178 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_179 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_180 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_181 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_182 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_183 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_184 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_185 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_186 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_187 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_188 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_189 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_190 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_191 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_192 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_193 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_194 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_195 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_196 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_197 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_198 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_199 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_200 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_201 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_202 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_203 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_204 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_205 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_206 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_207 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_208 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_209 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_210 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_211 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_212 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_213 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_214 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_215 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_216 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_217 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_218 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_219 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_220 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_221 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_222 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_223 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_224 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_225 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_226 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_227 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_228 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_229 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_230 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_231 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_232 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_233 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_234 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_235 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_236 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_237 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_238 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_239 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_240 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _uncommonBits_T_241 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_22 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_23 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_24 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_25 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_26 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_27 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_28 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_29 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_30 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_31 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_32 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_33 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_34 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_35 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_36 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_37 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_38 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_39 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_40 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_41 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_42 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [9:0] _source_ok_uncommonBits_T_43 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = io_in_a_bits_source_0 == 10'h1D0; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [7:0] _source_ok_T_1 = io_in_a_bits_source_0[9:2]; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_T_7 = io_in_a_bits_source_0[9:2]; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_T_13 = io_in_a_bits_source_0[9:2]; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_T_19 = io_in_a_bits_source_0[9:2]; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_T_25 = io_in_a_bits_source_0[9:2]; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_T_31 = io_in_a_bits_source_0[9:2]; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_T_73 = io_in_a_bits_source_0[9:2]; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_T_79 = io_in_a_bits_source_0[9:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_2 = _source_ok_T_1 == 8'h70; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_8 = _source_ok_T_7 == 8'h71; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_14 = _source_ok_T_13 == 8'h72; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_20 = _source_ok_T_19 == 8'h73; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_26 = _source_ok_T_25 == 8'h7C; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_28 = _source_ok_T_26; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_30 = _source_ok_T_28; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_5 = _source_ok_T_30; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_32 = _source_ok_T_31 == 8'h7B; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_34 = _source_ok_T_32; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_36 = _source_ok_T_34; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_6 = _source_ok_T_36; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_37 = io_in_a_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_43 = io_in_a_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_49 = io_in_a_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_55 = io_in_a_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_61 = io_in_a_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_67 = io_in_a_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_85 = io_in_a_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_91 = io_in_a_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_97 = io_in_a_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_103 = io_in_a_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_109 = io_in_a_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_115 = io_in_a_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_121 = io_in_a_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_127 = io_in_a_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire _source_ok_T_38 = _source_ok_T_37 == 5'hD; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_40 = _source_ok_T_38; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_42 = _source_ok_T_40; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_7 = _source_ok_T_42; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_44 = _source_ok_T_43 == 5'hC; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_46 = _source_ok_T_44; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_48 = _source_ok_T_46; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_8 = _source_ok_T_48; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_50 = _source_ok_T_49 == 5'hB; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_52 = _source_ok_T_50; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_54 = _source_ok_T_52; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_9 = _source_ok_T_54; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_56 = _source_ok_T_55 == 5'hA; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_58 = _source_ok_T_56; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_60 = _source_ok_T_58; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_10 = _source_ok_T_60; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_10 = _source_ok_uncommonBits_T_10[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_62 = _source_ok_T_61 == 5'h9; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_64 = _source_ok_T_62; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_66 = _source_ok_T_64; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_11 = _source_ok_T_66; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_11 = _source_ok_uncommonBits_T_11[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_68 = _source_ok_T_67 == 5'h8; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_70 = _source_ok_T_68; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_72 = _source_ok_T_70; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_12 = _source_ok_T_72; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_12 = _source_ok_uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_74 = _source_ok_T_73 == 8'h7A; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_76 = _source_ok_T_74; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_78 = _source_ok_T_76; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_13 = _source_ok_T_78; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_13 = _source_ok_uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_80 = _source_ok_T_79 == 8'h79; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_82 = _source_ok_T_80; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_84 = _source_ok_T_82; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_14 = _source_ok_T_84; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_14 = _source_ok_uncommonBits_T_14[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_86 = _source_ok_T_85 == 5'h7; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_88 = _source_ok_T_86; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_90 = _source_ok_T_88; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_15 = _source_ok_T_90; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_15 = _source_ok_uncommonBits_T_15[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_92 = _source_ok_T_91 == 5'h6; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_94 = _source_ok_T_92; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_96 = _source_ok_T_94; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_16 = _source_ok_T_96; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_16 = _source_ok_uncommonBits_T_16[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_98 = _source_ok_T_97 == 5'h5; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_100 = _source_ok_T_98; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_102 = _source_ok_T_100; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_17 = _source_ok_T_102; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_17 = _source_ok_uncommonBits_T_17[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_104 = _source_ok_T_103 == 5'h4; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_106 = _source_ok_T_104; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_108 = _source_ok_T_106; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_18 = _source_ok_T_108; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_18 = _source_ok_uncommonBits_T_18[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_110 = _source_ok_T_109 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_112 = _source_ok_T_110; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_114 = _source_ok_T_112; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_19 = _source_ok_T_114; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_19 = _source_ok_uncommonBits_T_19[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_116 = _source_ok_T_115 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_118 = _source_ok_T_116; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_120 = _source_ok_T_118; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_20 = _source_ok_T_120; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_20 = _source_ok_uncommonBits_T_20[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_122 = _source_ok_T_121 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_124 = _source_ok_T_122; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_126 = _source_ok_T_124; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_21 = _source_ok_T_126; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_21 = _source_ok_uncommonBits_T_21[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_128 = _source_ok_T_127 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_130 = _source_ok_T_128; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_132 = _source_ok_T_130; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_22 = _source_ok_T_132; // @[Parameters.scala:1138:31]
wire _source_ok_T_133 = io_in_a_bits_source_0 == 10'h1E0; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_23 = _source_ok_T_133; // @[Parameters.scala:1138:31]
wire _source_ok_T_134 = io_in_a_bits_source_0 == 10'h1E1; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_24 = _source_ok_T_134; // @[Parameters.scala:1138:31]
wire _source_ok_T_135 = io_in_a_bits_source_0 == 10'h1E2; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_25 = _source_ok_T_135; // @[Parameters.scala:1138:31]
wire _source_ok_T_136 = io_in_a_bits_source_0 == 10'h200; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_26 = _source_ok_T_136; // @[Parameters.scala:1138:31]
wire _source_ok_T_137 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_138 = _source_ok_T_137 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_139 = _source_ok_T_138 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_140 = _source_ok_T_139 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_141 = _source_ok_T_140 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_142 = _source_ok_T_141 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_143 = _source_ok_T_142 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_144 = _source_ok_T_143 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_145 = _source_ok_T_144 | _source_ok_WIRE_9; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_146 = _source_ok_T_145 | _source_ok_WIRE_10; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_147 = _source_ok_T_146 | _source_ok_WIRE_11; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_148 = _source_ok_T_147 | _source_ok_WIRE_12; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_149 = _source_ok_T_148 | _source_ok_WIRE_13; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_150 = _source_ok_T_149 | _source_ok_WIRE_14; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_151 = _source_ok_T_150 | _source_ok_WIRE_15; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_152 = _source_ok_T_151 | _source_ok_WIRE_16; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_153 = _source_ok_T_152 | _source_ok_WIRE_17; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_154 = _source_ok_T_153 | _source_ok_WIRE_18; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_155 = _source_ok_T_154 | _source_ok_WIRE_19; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_156 = _source_ok_T_155 | _source_ok_WIRE_20; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_157 = _source_ok_T_156 | _source_ok_WIRE_21; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_158 = _source_ok_T_157 | _source_ok_WIRE_22; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_159 = _source_ok_T_158 | _source_ok_WIRE_23; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_160 = _source_ok_T_159 | _source_ok_WIRE_24; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_161 = _source_ok_T_160 | _source_ok_WIRE_25; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_161 | _source_ok_WIRE_26; // @[Parameters.scala:1138:31, :1139:46]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [27:0] _is_aligned_T = {22'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 28'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_6 = _uncommonBits_T_6[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_7 = _uncommonBits_T_7[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_8 = _uncommonBits_T_8[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_9 = _uncommonBits_T_9[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_10 = _uncommonBits_T_10[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_11 = _uncommonBits_T_11[4:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_14 = _uncommonBits_T_14[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_15 = _uncommonBits_T_15[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_16 = _uncommonBits_T_16[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_17 = _uncommonBits_T_17[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_18 = _uncommonBits_T_18[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_19 = _uncommonBits_T_19[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_20 = _uncommonBits_T_20[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_21 = _uncommonBits_T_21[4:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_28 = _uncommonBits_T_28[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_29 = _uncommonBits_T_29[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_30 = _uncommonBits_T_30[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_31 = _uncommonBits_T_31[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_32 = _uncommonBits_T_32[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_33 = _uncommonBits_T_33[4:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_36 = _uncommonBits_T_36[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_37 = _uncommonBits_T_37[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_38 = _uncommonBits_T_38[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_39 = _uncommonBits_T_39[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_40 = _uncommonBits_T_40[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_41 = _uncommonBits_T_41[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_42 = _uncommonBits_T_42[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_43 = _uncommonBits_T_43[4:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_44 = _uncommonBits_T_44[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_46 = _uncommonBits_T_46[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_47 = _uncommonBits_T_47[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_48 = _uncommonBits_T_48[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_49 = _uncommonBits_T_49[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_50 = _uncommonBits_T_50[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_51 = _uncommonBits_T_51[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_52 = _uncommonBits_T_52[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_53 = _uncommonBits_T_53[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_54 = _uncommonBits_T_54[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_55 = _uncommonBits_T_55[4:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_56 = _uncommonBits_T_56[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_57 = _uncommonBits_T_57[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_58 = _uncommonBits_T_58[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_59 = _uncommonBits_T_59[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_60 = _uncommonBits_T_60[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_61 = _uncommonBits_T_61[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_62 = _uncommonBits_T_62[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_63 = _uncommonBits_T_63[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_64 = _uncommonBits_T_64[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_65 = _uncommonBits_T_65[4:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_66 = _uncommonBits_T_66[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_67 = _uncommonBits_T_67[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_68 = _uncommonBits_T_68[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_69 = _uncommonBits_T_69[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_70 = _uncommonBits_T_70[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_71 = _uncommonBits_T_71[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_72 = _uncommonBits_T_72[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_73 = _uncommonBits_T_73[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_74 = _uncommonBits_T_74[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_75 = _uncommonBits_T_75[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_76 = _uncommonBits_T_76[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_77 = _uncommonBits_T_77[4:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_78 = _uncommonBits_T_78[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_79 = _uncommonBits_T_79[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_80 = _uncommonBits_T_80[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_81 = _uncommonBits_T_81[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_82 = _uncommonBits_T_82[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_83 = _uncommonBits_T_83[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_84 = _uncommonBits_T_84[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_85 = _uncommonBits_T_85[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_86 = _uncommonBits_T_86[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_87 = _uncommonBits_T_87[4:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_88 = _uncommonBits_T_88[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_89 = _uncommonBits_T_89[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_90 = _uncommonBits_T_90[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_91 = _uncommonBits_T_91[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_92 = _uncommonBits_T_92[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_93 = _uncommonBits_T_93[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_94 = _uncommonBits_T_94[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_95 = _uncommonBits_T_95[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_96 = _uncommonBits_T_96[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_97 = _uncommonBits_T_97[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_98 = _uncommonBits_T_98[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_99 = _uncommonBits_T_99[4:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_100 = _uncommonBits_T_100[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_101 = _uncommonBits_T_101[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_102 = _uncommonBits_T_102[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_103 = _uncommonBits_T_103[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_104 = _uncommonBits_T_104[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_105 = _uncommonBits_T_105[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_106 = _uncommonBits_T_106[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_107 = _uncommonBits_T_107[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_108 = _uncommonBits_T_108[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_109 = _uncommonBits_T_109[4:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_110 = _uncommonBits_T_110[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_111 = _uncommonBits_T_111[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_112 = _uncommonBits_T_112[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_113 = _uncommonBits_T_113[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_114 = _uncommonBits_T_114[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_115 = _uncommonBits_T_115[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_116 = _uncommonBits_T_116[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_117 = _uncommonBits_T_117[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_118 = _uncommonBits_T_118[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_119 = _uncommonBits_T_119[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_120 = _uncommonBits_T_120[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_121 = _uncommonBits_T_121[4:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_122 = _uncommonBits_T_122[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_123 = _uncommonBits_T_123[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_124 = _uncommonBits_T_124[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_125 = _uncommonBits_T_125[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_126 = _uncommonBits_T_126[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_127 = _uncommonBits_T_127[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_128 = _uncommonBits_T_128[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_129 = _uncommonBits_T_129[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_130 = _uncommonBits_T_130[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_131 = _uncommonBits_T_131[4:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_132 = _uncommonBits_T_132[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_133 = _uncommonBits_T_133[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_134 = _uncommonBits_T_134[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_135 = _uncommonBits_T_135[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_136 = _uncommonBits_T_136[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_137 = _uncommonBits_T_137[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_138 = _uncommonBits_T_138[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_139 = _uncommonBits_T_139[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_140 = _uncommonBits_T_140[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_141 = _uncommonBits_T_141[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_142 = _uncommonBits_T_142[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_143 = _uncommonBits_T_143[4:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_144 = _uncommonBits_T_144[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_145 = _uncommonBits_T_145[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_146 = _uncommonBits_T_146[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_147 = _uncommonBits_T_147[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_148 = _uncommonBits_T_148[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_149 = _uncommonBits_T_149[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_150 = _uncommonBits_T_150[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_151 = _uncommonBits_T_151[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_152 = _uncommonBits_T_152[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_153 = _uncommonBits_T_153[4:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_154 = _uncommonBits_T_154[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_155 = _uncommonBits_T_155[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_156 = _uncommonBits_T_156[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_157 = _uncommonBits_T_157[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_158 = _uncommonBits_T_158[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_159 = _uncommonBits_T_159[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_160 = _uncommonBits_T_160[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_161 = _uncommonBits_T_161[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_162 = _uncommonBits_T_162[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_163 = _uncommonBits_T_163[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_164 = _uncommonBits_T_164[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_165 = _uncommonBits_T_165[4:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_166 = _uncommonBits_T_166[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_167 = _uncommonBits_T_167[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_168 = _uncommonBits_T_168[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_169 = _uncommonBits_T_169[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_170 = _uncommonBits_T_170[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_171 = _uncommonBits_T_171[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_172 = _uncommonBits_T_172[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_173 = _uncommonBits_T_173[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_174 = _uncommonBits_T_174[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_175 = _uncommonBits_T_175[4:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_176 = _uncommonBits_T_176[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_177 = _uncommonBits_T_177[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_178 = _uncommonBits_T_178[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_179 = _uncommonBits_T_179[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_180 = _uncommonBits_T_180[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_181 = _uncommonBits_T_181[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_182 = _uncommonBits_T_182[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_183 = _uncommonBits_T_183[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_184 = _uncommonBits_T_184[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_185 = _uncommonBits_T_185[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_186 = _uncommonBits_T_186[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_187 = _uncommonBits_T_187[4:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_188 = _uncommonBits_T_188[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_189 = _uncommonBits_T_189[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_190 = _uncommonBits_T_190[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_191 = _uncommonBits_T_191[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_192 = _uncommonBits_T_192[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_193 = _uncommonBits_T_193[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_194 = _uncommonBits_T_194[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_195 = _uncommonBits_T_195[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_196 = _uncommonBits_T_196[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_197 = _uncommonBits_T_197[4:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_198 = _uncommonBits_T_198[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_199 = _uncommonBits_T_199[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_200 = _uncommonBits_T_200[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_201 = _uncommonBits_T_201[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_202 = _uncommonBits_T_202[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_203 = _uncommonBits_T_203[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_204 = _uncommonBits_T_204[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_205 = _uncommonBits_T_205[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_206 = _uncommonBits_T_206[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_207 = _uncommonBits_T_207[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_208 = _uncommonBits_T_208[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_209 = _uncommonBits_T_209[4:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_210 = _uncommonBits_T_210[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_211 = _uncommonBits_T_211[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_212 = _uncommonBits_T_212[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_213 = _uncommonBits_T_213[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_214 = _uncommonBits_T_214[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_215 = _uncommonBits_T_215[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_216 = _uncommonBits_T_216[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_217 = _uncommonBits_T_217[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_218 = _uncommonBits_T_218[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_219 = _uncommonBits_T_219[4:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_220 = _uncommonBits_T_220[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_221 = _uncommonBits_T_221[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_222 = _uncommonBits_T_222[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_223 = _uncommonBits_T_223[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_224 = _uncommonBits_T_224[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_225 = _uncommonBits_T_225[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_226 = _uncommonBits_T_226[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_227 = _uncommonBits_T_227[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_228 = _uncommonBits_T_228[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_229 = _uncommonBits_T_229[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_230 = _uncommonBits_T_230[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_231 = _uncommonBits_T_231[4:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_232 = _uncommonBits_T_232[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_233 = _uncommonBits_T_233[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_234 = _uncommonBits_T_234[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_235 = _uncommonBits_T_235[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_236 = _uncommonBits_T_236[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_237 = _uncommonBits_T_237[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_238 = _uncommonBits_T_238[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_239 = _uncommonBits_T_239[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_240 = _uncommonBits_T_240[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_241 = _uncommonBits_T_241[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_162 = io_in_d_bits_source_0 == 10'h1D0; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_162; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_22 = _source_ok_uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}]
wire [7:0] _source_ok_T_163 = io_in_d_bits_source_0[9:2]; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_T_169 = io_in_d_bits_source_0[9:2]; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_T_175 = io_in_d_bits_source_0[9:2]; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_T_181 = io_in_d_bits_source_0[9:2]; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_T_187 = io_in_d_bits_source_0[9:2]; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_T_193 = io_in_d_bits_source_0[9:2]; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_T_235 = io_in_d_bits_source_0[9:2]; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_T_241 = io_in_d_bits_source_0[9:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_164 = _source_ok_T_163 == 8'h70; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_166 = _source_ok_T_164; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_168 = _source_ok_T_166; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_1 = _source_ok_T_168; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_23 = _source_ok_uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_170 = _source_ok_T_169 == 8'h71; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_172 = _source_ok_T_170; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_174 = _source_ok_T_172; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_2 = _source_ok_T_174; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_24 = _source_ok_uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_176 = _source_ok_T_175 == 8'h72; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_178 = _source_ok_T_176; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_180 = _source_ok_T_178; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_3 = _source_ok_T_180; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_25 = _source_ok_uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_182 = _source_ok_T_181 == 8'h73; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_184 = _source_ok_T_182; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_186 = _source_ok_T_184; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_4 = _source_ok_T_186; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_26 = _source_ok_uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_188 = _source_ok_T_187 == 8'h7C; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_190 = _source_ok_T_188; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_192 = _source_ok_T_190; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_5 = _source_ok_T_192; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_27 = _source_ok_uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_194 = _source_ok_T_193 == 8'h7B; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_196 = _source_ok_T_194; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_198 = _source_ok_T_196; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_6 = _source_ok_T_198; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_28 = _source_ok_uncommonBits_T_28[4:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_199 = io_in_d_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_205 = io_in_d_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_211 = io_in_d_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_217 = io_in_d_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_223 = io_in_d_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_229 = io_in_d_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_247 = io_in_d_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_253 = io_in_d_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_259 = io_in_d_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_265 = io_in_d_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_271 = io_in_d_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_277 = io_in_d_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_283 = io_in_d_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_289 = io_in_d_bits_source_0[9:5]; // @[Monitor.scala:36:7]
wire _source_ok_T_200 = _source_ok_T_199 == 5'hD; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_202 = _source_ok_T_200; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_204 = _source_ok_T_202; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_7 = _source_ok_T_204; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_29 = _source_ok_uncommonBits_T_29[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_206 = _source_ok_T_205 == 5'hC; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_208 = _source_ok_T_206; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_210 = _source_ok_T_208; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_8 = _source_ok_T_210; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_30 = _source_ok_uncommonBits_T_30[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_212 = _source_ok_T_211 == 5'hB; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_214 = _source_ok_T_212; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_216 = _source_ok_T_214; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_9 = _source_ok_T_216; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_31 = _source_ok_uncommonBits_T_31[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_218 = _source_ok_T_217 == 5'hA; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_220 = _source_ok_T_218; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_222 = _source_ok_T_220; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_10 = _source_ok_T_222; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_32 = _source_ok_uncommonBits_T_32[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_224 = _source_ok_T_223 == 5'h9; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_226 = _source_ok_T_224; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_228 = _source_ok_T_226; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_11 = _source_ok_T_228; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_33 = _source_ok_uncommonBits_T_33[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_230 = _source_ok_T_229 == 5'h8; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_232 = _source_ok_T_230; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_234 = _source_ok_T_232; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_12 = _source_ok_T_234; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_34 = _source_ok_uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_236 = _source_ok_T_235 == 8'h7A; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_238 = _source_ok_T_236; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_240 = _source_ok_T_238; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_13 = _source_ok_T_240; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_35 = _source_ok_uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_242 = _source_ok_T_241 == 8'h79; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_244 = _source_ok_T_242; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_246 = _source_ok_T_244; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_14 = _source_ok_T_246; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_36 = _source_ok_uncommonBits_T_36[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_248 = _source_ok_T_247 == 5'h7; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_250 = _source_ok_T_248; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_252 = _source_ok_T_250; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_15 = _source_ok_T_252; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_37 = _source_ok_uncommonBits_T_37[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_254 = _source_ok_T_253 == 5'h6; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_256 = _source_ok_T_254; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_258 = _source_ok_T_256; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_16 = _source_ok_T_258; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_38 = _source_ok_uncommonBits_T_38[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_260 = _source_ok_T_259 == 5'h5; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_262 = _source_ok_T_260; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_264 = _source_ok_T_262; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_17 = _source_ok_T_264; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_39 = _source_ok_uncommonBits_T_39[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_266 = _source_ok_T_265 == 5'h4; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_268 = _source_ok_T_266; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_270 = _source_ok_T_268; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_18 = _source_ok_T_270; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_40 = _source_ok_uncommonBits_T_40[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_272 = _source_ok_T_271 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_274 = _source_ok_T_272; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_276 = _source_ok_T_274; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_19 = _source_ok_T_276; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_41 = _source_ok_uncommonBits_T_41[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_278 = _source_ok_T_277 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_280 = _source_ok_T_278; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_282 = _source_ok_T_280; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_20 = _source_ok_T_282; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_42 = _source_ok_uncommonBits_T_42[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_284 = _source_ok_T_283 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_286 = _source_ok_T_284; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_288 = _source_ok_T_286; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_21 = _source_ok_T_288; // @[Parameters.scala:1138:31]
wire [4:0] source_ok_uncommonBits_43 = _source_ok_uncommonBits_T_43[4:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_290 = _source_ok_T_289 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_292 = _source_ok_T_290; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_294 = _source_ok_T_292; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_22 = _source_ok_T_294; // @[Parameters.scala:1138:31]
wire _source_ok_T_295 = io_in_d_bits_source_0 == 10'h1E0; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_23 = _source_ok_T_295; // @[Parameters.scala:1138:31]
wire _source_ok_T_296 = io_in_d_bits_source_0 == 10'h1E1; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_24 = _source_ok_T_296; // @[Parameters.scala:1138:31]
wire _source_ok_T_297 = io_in_d_bits_source_0 == 10'h1E2; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_25 = _source_ok_T_297; // @[Parameters.scala:1138:31]
wire _source_ok_T_298 = io_in_d_bits_source_0 == 10'h200; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_26 = _source_ok_T_298; // @[Parameters.scala:1138:31]
wire _source_ok_T_299 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_300 = _source_ok_T_299 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_301 = _source_ok_T_300 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_302 = _source_ok_T_301 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_303 = _source_ok_T_302 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_304 = _source_ok_T_303 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_305 = _source_ok_T_304 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_306 = _source_ok_T_305 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_307 = _source_ok_T_306 | _source_ok_WIRE_1_9; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_308 = _source_ok_T_307 | _source_ok_WIRE_1_10; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_309 = _source_ok_T_308 | _source_ok_WIRE_1_11; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_310 = _source_ok_T_309 | _source_ok_WIRE_1_12; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_311 = _source_ok_T_310 | _source_ok_WIRE_1_13; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_312 = _source_ok_T_311 | _source_ok_WIRE_1_14; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_313 = _source_ok_T_312 | _source_ok_WIRE_1_15; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_314 = _source_ok_T_313 | _source_ok_WIRE_1_16; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_315 = _source_ok_T_314 | _source_ok_WIRE_1_17; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_316 = _source_ok_T_315 | _source_ok_WIRE_1_18; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_317 = _source_ok_T_316 | _source_ok_WIRE_1_19; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_318 = _source_ok_T_317 | _source_ok_WIRE_1_20; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_319 = _source_ok_T_318 | _source_ok_WIRE_1_21; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_320 = _source_ok_T_319 | _source_ok_WIRE_1_22; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_321 = _source_ok_T_320 | _source_ok_WIRE_1_23; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_322 = _source_ok_T_321 | _source_ok_WIRE_1_24; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_323 = _source_ok_T_322 | _source_ok_WIRE_1_25; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_323 | _source_ok_WIRE_1_26; // @[Parameters.scala:1138:31, :1139:46]
wire _T_2649 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_2649; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_2649; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [9:0] source; // @[Monitor.scala:390:22]
reg [27:0] address; // @[Monitor.scala:391:22]
wire _T_2722 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_2722; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_2722; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_2722; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [9:0] source_1; // @[Monitor.scala:541:22]
reg [512:0] inflight; // @[Monitor.scala:614:27]
reg [2051:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [2051:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [512:0] a_set; // @[Monitor.scala:626:34]
wire [512:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [2051:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [2051:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [12:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [12:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [12:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [12:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [12:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [12:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [12:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [12:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [12:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [2051:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [2051:0] _a_opcode_lookup_T_6 = {2048'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [2051:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[2051:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [2051:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [2051:0] _a_size_lookup_T_6 = {2048'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [2051:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[2051:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [1023:0] _GEN_2 = 1024'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [1023:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [1023:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[512:0] : 513'h0; // @[OneHot.scala:58:35]
wire _T_2575 = _T_2649 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_2575 ? _a_set_T[512:0] : 513'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_2575 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_2575 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [12:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [12:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [12:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [8194:0] _a_opcodes_set_T_1 = {8191'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_2575 ? _a_opcodes_set_T_1[2051:0] : 2052'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [8194:0] _a_sizes_set_T_1 = {8191'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_2575 ? _a_sizes_set_T_1[2051:0] : 2052'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [512:0] d_clr; // @[Monitor.scala:664:34]
wire [512:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [2051:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [2051:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_2621 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [1023:0] _GEN_5 = 1024'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [1023:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [1023:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [1023:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [1023:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_2621 & ~d_release_ack ? _d_clr_wo_ready_T[512:0] : 513'h0; // @[OneHot.scala:58:35]
wire _T_2590 = _T_2722 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_2590 ? _d_clr_T[512:0] : 513'h0; // @[OneHot.scala:58:35]
wire [8206:0] _d_opcodes_clr_T_5 = 8207'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_2590 ? _d_opcodes_clr_T_5[2051:0] : 2052'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [8206:0] _d_sizes_clr_T_5 = 8207'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_2590 ? _d_sizes_clr_T_5[2051:0] : 2052'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [512:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [512:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [512:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [2051:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [2051:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [2051:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [2051:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [2051:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [2051:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [512:0] inflight_1; // @[Monitor.scala:726:35]
wire [512:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [2051:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [2051:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [2051:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [2051:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [2051:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [2051:0] _c_opcode_lookup_T_6 = {2048'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [2051:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[2051:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [2051:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [2051:0] _c_size_lookup_T_6 = {2048'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [2051:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[2051:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [512:0] d_clr_1; // @[Monitor.scala:774:34]
wire [512:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [2051:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [2051:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_2693 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_2693 & d_release_ack_1 ? _d_clr_wo_ready_T_1[512:0] : 513'h0; // @[OneHot.scala:58:35]
wire _T_2675 = _T_2722 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_2675 ? _d_clr_T_1[512:0] : 513'h0; // @[OneHot.scala:58:35]
wire [8206:0] _d_opcodes_clr_T_11 = 8207'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_2675 ? _d_opcodes_clr_T_11[2051:0] : 2052'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [8206:0] _d_sizes_clr_T_11 = 8207'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_2675 ? _d_sizes_clr_T_11[2051:0] : 2052'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 10'h0; // @[Monitor.scala:36:7, :795:113]
wire [512:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [512:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [2051:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [2051:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [2051:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [2051:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File INToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import consts._
class INToRecFN(intWidth: Int, expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"INToRecFN_i${intWidth}_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val signedIn = Input(Bool())
val in = Input(Bits(intWidth.W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val intAsRawFloat = rawFloatFromIN(io.signedIn, io.in);
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
intAsRawFloat.expWidth,
intWidth,
expWidth,
sigWidth,
flRoundOpt_sigMSBitAlwaysZero | flRoundOpt_neverUnderflows
))
roundAnyRawFNToRecFN.io.invalidExc := false.B
roundAnyRawFNToRecFN.io.infiniteExc := false.B
roundAnyRawFNToRecFN.io.in := intAsRawFloat
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
File primitives.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object lowMask
{
def apply(in: UInt, topBound: BigInt, bottomBound: BigInt): UInt =
{
require(topBound != bottomBound)
val numInVals = BigInt(1)<<in.getWidth
if (topBound < bottomBound) {
lowMask(~in, numInVals - 1 - topBound, numInVals - 1 - bottomBound)
} else if (numInVals > 64 /* Empirical */) {
// For simulation performance, we should avoid generating
// exteremely wide shifters, so we divide and conquer.
// Empirically, this does not impact synthesis QoR.
val mid = numInVals / 2
val msb = in(in.getWidth - 1)
val lsbs = in(in.getWidth - 2, 0)
if (mid < topBound) {
if (mid <= bottomBound) {
Mux(msb,
lowMask(lsbs, topBound - mid, bottomBound - mid),
0.U
)
} else {
Mux(msb,
lowMask(lsbs, topBound - mid, 0) ## ((BigInt(1)<<(mid - bottomBound).toInt) - 1).U,
lowMask(lsbs, mid, bottomBound)
)
}
} else {
~Mux(msb, 0.U, ~lowMask(lsbs, topBound, bottomBound))
}
} else {
val shift = (BigInt(-1)<<numInVals.toInt).S>>in
Reverse(
shift(
(numInVals - 1 - bottomBound).toInt,
(numInVals - topBound).toInt
)
)
}
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object countLeadingZeros
{
def apply(in: UInt): UInt = PriorityEncoder(in.asBools.reverse)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy2
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 1)>>1
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 2 + 1, ix * 2).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 2).orR
reducedVec.asUInt
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy4
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 3)>>2
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 4 + 3, ix * 4).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 4).orR
reducedVec.asUInt
}
}
File rawFloatFromIN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
object rawFloatFromIN
{
def apply(signedIn: Bool, in: Bits): RawFloat =
{
val expWidth = log2Up(in.getWidth) + 1
//*** CHANGE THIS; CAN BE VERY LARGE:
val extIntWidth = 1<<(expWidth - 1)
val sign = signedIn && in(in.getWidth - 1)
val absIn = Mux(sign, -in.asUInt, in.asUInt)
val extAbsIn = (0.U(extIntWidth.W) ## absIn)(extIntWidth - 1, 0)
val adjustedNormDist = countLeadingZeros(extAbsIn)
val sig =
(extAbsIn<<adjustedNormDist)(
extIntWidth - 1, extIntWidth - in.getWidth)
val out = Wire(new RawFloat(expWidth, in.getWidth))
out.isNaN := false.B
out.isInf := false.B
out.isZero := ! sig(in.getWidth - 1)
out.sign := sign
out.sExp := (2.U(2.W) ## ~adjustedNormDist(expWidth - 2, 0)).zext
out.sig := sig
out
}
}
| module INToRecFN_i1_e8_s24_46(); // @[INToRecFN.scala:43:7]
wire [1:0] _intAsRawFloat_absIn_T = 2'h3; // @[rawFloatFromIN.scala:52:31]
wire [2:0] _intAsRawFloat_extAbsIn_T = 3'h1; // @[rawFloatFromIN.scala:53:44]
wire [2:0] _intAsRawFloat_sig_T = 3'h2; // @[rawFloatFromIN.scala:56:22]
wire [2:0] _intAsRawFloat_out_sExp_T_2 = 3'h4; // @[rawFloatFromIN.scala:64:33]
wire [3:0] intAsRawFloat_sExp = 4'h4; // @[rawFloatFromIN.scala:59:23, :64:72]
wire [3:0] _intAsRawFloat_out_sExp_T_3 = 4'h4; // @[rawFloatFromIN.scala:59:23, :64:72]
wire [1:0] intAsRawFloat_extAbsIn = 2'h1; // @[rawFloatFromIN.scala:53:53, :59:23, :65:20]
wire [1:0] intAsRawFloat_sig = 2'h1; // @[rawFloatFromIN.scala:53:53, :59:23, :65:20]
wire [4:0] io_exceptionFlags = 5'h0; // @[INToRecFN.scala:43:7, :46:16, :60:15]
wire [32:0] io_out = 33'h80000000; // @[INToRecFN.scala:43:7, :46:16, :60:15]
wire [2:0] io_roundingMode = 3'h0; // @[INToRecFN.scala:43:7, :46:16, :60:15]
wire io_in = 1'h1; // @[Mux.scala:50:70]
wire io_detectTininess = 1'h1; // @[Mux.scala:50:70]
wire _intAsRawFloat_sign_T = 1'h1; // @[Mux.scala:50:70]
wire _intAsRawFloat_absIn_T_1 = 1'h1; // @[Mux.scala:50:70]
wire intAsRawFloat_absIn = 1'h1; // @[Mux.scala:50:70]
wire _intAsRawFloat_adjustedNormDist_T = 1'h1; // @[Mux.scala:50:70]
wire intAsRawFloat_adjustedNormDist = 1'h1; // @[Mux.scala:50:70]
wire intAsRawFloat_sig_0 = 1'h1; // @[Mux.scala:50:70]
wire _intAsRawFloat_out_isZero_T = 1'h1; // @[Mux.scala:50:70]
wire _intAsRawFloat_out_sExp_T = 1'h1; // @[Mux.scala:50:70]
wire io_signedIn = 1'h0; // @[INToRecFN.scala:43:7]
wire intAsRawFloat_sign = 1'h0; // @[rawFloatFromIN.scala:51:29]
wire _intAsRawFloat_adjustedNormDist_T_1 = 1'h0; // @[primitives.scala:91:52]
wire intAsRawFloat_isNaN = 1'h0; // @[rawFloatFromIN.scala:59:23]
wire intAsRawFloat_isInf = 1'h0; // @[rawFloatFromIN.scala:59:23]
wire intAsRawFloat_isZero = 1'h0; // @[rawFloatFromIN.scala:59:23]
wire intAsRawFloat_sign_0 = 1'h0; // @[rawFloatFromIN.scala:59:23]
wire _intAsRawFloat_out_isZero_T_1 = 1'h0; // @[rawFloatFromIN.scala:62:23]
wire _intAsRawFloat_out_sExp_T_1 = 1'h0; // @[rawFloatFromIN.scala:64:36]
RoundAnyRawFNToRecFN_ie2_is1_oe8_os24_46 roundAnyRawFNToRecFN (); // @[INToRecFN.scala:60:15]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_164( // @[AsyncQueue.scala:58:7]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in = 1'h1; // @[ShiftReg.scala:45:23]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_178 io_out_sink_valid_0 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_121( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_131 io_out_source_valid_1 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File SinkX.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
class SinkXRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val address = UInt(params.inner.bundle.addressBits.W)
}
class SinkX(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val req = Decoupled(new FullRequest(params))
val x = Flipped(Decoupled(new SinkXRequest(params)))
})
val x = Queue(io.x, 1)
val (tag, set, offset) = params.parseAddress(x.bits.address)
x.ready := io.req.ready
io.req.valid := x.valid
params.ccover(x.valid && !x.ready, "SINKX_STALL", "Backpressure when accepting a control message")
io.req.bits.prio := VecInit(1.U(3.W).asBools) // same prio as A
io.req.bits.control:= true.B
io.req.bits.opcode := 0.U
io.req.bits.param := 0.U
io.req.bits.size := params.offsetBits.U
// The source does not matter, because a flush command never allocates a way.
// However, it must be a legal source, otherwise assertions might spuriously fire.
io.req.bits.source := params.inner.client.clients.map(_.sourceId.start).min.U
io.req.bits.offset := 0.U
io.req.bits.set := set
io.req.bits.tag := tag
io.req.bits.put := 0.U
}
| module SinkX( // @[SinkX.scala:28:7]
input clock, // @[SinkX.scala:28:7]
input reset, // @[SinkX.scala:28:7]
input io_req_ready, // @[SinkX.scala:30:14]
output io_req_valid, // @[SinkX.scala:30:14]
output [10:0] io_req_bits_tag, // @[SinkX.scala:30:14]
output [9:0] io_req_bits_set, // @[SinkX.scala:30:14]
output io_x_ready, // @[SinkX.scala:30:14]
input io_x_valid, // @[SinkX.scala:30:14]
input [31:0] io_x_bits_address // @[SinkX.scala:30:14]
);
wire [31:0] _x_q_io_deq_bits_address; // @[Decoupled.scala:362:21]
Queue1_SinkXRequest x_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (io_x_ready),
.io_enq_valid (io_x_valid),
.io_enq_bits_address (io_x_bits_address),
.io_deq_ready (io_req_ready),
.io_deq_valid (io_req_valid),
.io_deq_bits_address (_x_q_io_deq_bits_address)
); // @[Decoupled.scala:362:21]
assign io_req_bits_tag = {_x_q_io_deq_bits_address[31], _x_q_io_deq_bits_address[27:18]}; // @[Decoupled.scala:362:21]
assign io_req_bits_set = _x_q_io_deq_bits_address[17:8]; // @[Decoupled.scala:362:21]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module TLBuffer_a29d64s8k1z4u( // @[Buffer.scala:40:9]
input clock, // @[Buffer.scala:40:9]
input reset, // @[Buffer.scala:40:9]
output auto_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [28:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [28:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_corrupt // @[LazyModuleImp.scala:107:25]
);
wire auto_in_a_valid_0 = auto_in_a_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_opcode_0 = auto_in_a_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_param_0 = auto_in_a_bits_param; // @[Buffer.scala:40:9]
wire [3:0] auto_in_a_bits_size_0 = auto_in_a_bits_size; // @[Buffer.scala:40:9]
wire [7:0] auto_in_a_bits_source_0 = auto_in_a_bits_source; // @[Buffer.scala:40:9]
wire [28:0] auto_in_a_bits_address_0 = auto_in_a_bits_address; // @[Buffer.scala:40:9]
wire [7:0] auto_in_a_bits_mask_0 = auto_in_a_bits_mask; // @[Buffer.scala:40:9]
wire [63:0] auto_in_a_bits_data_0 = auto_in_a_bits_data; // @[Buffer.scala:40:9]
wire auto_in_a_bits_corrupt_0 = auto_in_a_bits_corrupt; // @[Buffer.scala:40:9]
wire auto_in_d_ready_0 = auto_in_d_ready; // @[Buffer.scala:40:9]
wire auto_out_a_ready_0 = auto_out_a_ready; // @[Buffer.scala:40:9]
wire auto_out_d_valid_0 = auto_out_d_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_out_d_bits_opcode_0 = auto_out_d_bits_opcode; // @[Buffer.scala:40:9]
wire [1:0] auto_out_d_bits_param_0 = auto_out_d_bits_param; // @[Buffer.scala:40:9]
wire [3:0] auto_out_d_bits_size_0 = auto_out_d_bits_size; // @[Buffer.scala:40:9]
wire [7:0] auto_out_d_bits_source_0 = auto_out_d_bits_source; // @[Buffer.scala:40:9]
wire auto_out_d_bits_sink_0 = auto_out_d_bits_sink; // @[Buffer.scala:40:9]
wire auto_out_d_bits_denied_0 = auto_out_d_bits_denied; // @[Buffer.scala:40:9]
wire [63:0] auto_out_d_bits_data_0 = auto_out_d_bits_data; // @[Buffer.scala:40:9]
wire auto_out_d_bits_corrupt_0 = auto_out_d_bits_corrupt; // @[Buffer.scala:40:9]
wire nodeIn_a_ready; // @[MixedNode.scala:551:17]
wire nodeIn_a_valid = auto_in_a_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_opcode = auto_in_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_param = auto_in_a_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] nodeIn_a_bits_size = auto_in_a_bits_size_0; // @[Buffer.scala:40:9]
wire [7:0] nodeIn_a_bits_source = auto_in_a_bits_source_0; // @[Buffer.scala:40:9]
wire [28:0] nodeIn_a_bits_address = auto_in_a_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] nodeIn_a_bits_mask = auto_in_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] nodeIn_a_bits_data = auto_in_a_bits_data_0; // @[Buffer.scala:40:9]
wire nodeIn_a_bits_corrupt = auto_in_a_bits_corrupt_0; // @[Buffer.scala:40:9]
wire nodeIn_d_ready = auto_in_d_ready_0; // @[Buffer.scala:40:9]
wire nodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] nodeIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] nodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [7:0] nodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] nodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire nodeOut_a_ready = auto_out_a_ready_0; // @[Buffer.scala:40:9]
wire nodeOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [28:0] nodeOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire nodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire nodeOut_d_ready; // @[MixedNode.scala:542:17]
wire nodeOut_d_valid = auto_out_d_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeOut_d_bits_opcode = auto_out_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [1:0] nodeOut_d_bits_param = auto_out_d_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] nodeOut_d_bits_size = auto_out_d_bits_size_0; // @[Buffer.scala:40:9]
wire [7:0] nodeOut_d_bits_source = auto_out_d_bits_source_0; // @[Buffer.scala:40:9]
wire nodeOut_d_bits_sink = auto_out_d_bits_sink_0; // @[Buffer.scala:40:9]
wire nodeOut_d_bits_denied = auto_out_d_bits_denied_0; // @[Buffer.scala:40:9]
wire [63:0] nodeOut_d_bits_data = auto_out_d_bits_data_0; // @[Buffer.scala:40:9]
wire nodeOut_d_bits_corrupt = auto_out_d_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_in_a_ready_0; // @[Buffer.scala:40:9]
wire [2:0] auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [1:0] auto_in_d_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] auto_in_d_bits_size_0; // @[Buffer.scala:40:9]
wire [7:0] auto_in_d_bits_source_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_sink_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_denied_0; // @[Buffer.scala:40:9]
wire [63:0] auto_in_d_bits_data_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_in_d_valid_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] auto_out_a_bits_size_0; // @[Buffer.scala:40:9]
wire [7:0] auto_out_a_bits_source_0; // @[Buffer.scala:40:9]
wire [28:0] auto_out_a_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] auto_out_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] auto_out_a_bits_data_0; // @[Buffer.scala:40:9]
wire auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_out_a_valid_0; // @[Buffer.scala:40:9]
wire auto_out_d_ready_0; // @[Buffer.scala:40:9]
assign auto_in_a_ready_0 = nodeIn_a_ready; // @[Buffer.scala:40:9]
assign auto_in_d_valid_0 = nodeIn_d_valid; // @[Buffer.scala:40:9]
assign auto_in_d_bits_opcode_0 = nodeIn_d_bits_opcode; // @[Buffer.scala:40:9]
assign auto_in_d_bits_param_0 = nodeIn_d_bits_param; // @[Buffer.scala:40:9]
assign auto_in_d_bits_size_0 = nodeIn_d_bits_size; // @[Buffer.scala:40:9]
assign auto_in_d_bits_source_0 = nodeIn_d_bits_source; // @[Buffer.scala:40:9]
assign auto_in_d_bits_sink_0 = nodeIn_d_bits_sink; // @[Buffer.scala:40:9]
assign auto_in_d_bits_denied_0 = nodeIn_d_bits_denied; // @[Buffer.scala:40:9]
assign auto_in_d_bits_data_0 = nodeIn_d_bits_data; // @[Buffer.scala:40:9]
assign auto_in_d_bits_corrupt_0 = nodeIn_d_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_a_valid_0 = nodeOut_a_valid; // @[Buffer.scala:40:9]
assign auto_out_a_bits_opcode_0 = nodeOut_a_bits_opcode; // @[Buffer.scala:40:9]
assign auto_out_a_bits_param_0 = nodeOut_a_bits_param; // @[Buffer.scala:40:9]
assign auto_out_a_bits_size_0 = nodeOut_a_bits_size; // @[Buffer.scala:40:9]
assign auto_out_a_bits_source_0 = nodeOut_a_bits_source; // @[Buffer.scala:40:9]
assign auto_out_a_bits_address_0 = nodeOut_a_bits_address; // @[Buffer.scala:40:9]
assign auto_out_a_bits_mask_0 = nodeOut_a_bits_mask; // @[Buffer.scala:40:9]
assign auto_out_a_bits_data_0 = nodeOut_a_bits_data; // @[Buffer.scala:40:9]
assign auto_out_a_bits_corrupt_0 = nodeOut_a_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_d_ready_0 = nodeOut_d_ready; // @[Buffer.scala:40:9]
TLMonitor_20 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (nodeIn_a_ready), // @[MixedNode.scala:551:17]
.io_in_a_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_in_a_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_a_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_in_a_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_in_a_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_in_a_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_in_a_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_in_a_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_in_a_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_d_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_in_d_valid (nodeIn_d_valid), // @[MixedNode.scala:551:17]
.io_in_d_bits_opcode (nodeIn_d_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_d_bits_param (nodeIn_d_bits_param), // @[MixedNode.scala:551:17]
.io_in_d_bits_size (nodeIn_d_bits_size), // @[MixedNode.scala:551:17]
.io_in_d_bits_source (nodeIn_d_bits_source), // @[MixedNode.scala:551:17]
.io_in_d_bits_sink (nodeIn_d_bits_sink), // @[MixedNode.scala:551:17]
.io_in_d_bits_denied (nodeIn_d_bits_denied), // @[MixedNode.scala:551:17]
.io_in_d_bits_data (nodeIn_d_bits_data), // @[MixedNode.scala:551:17]
.io_in_d_bits_corrupt (nodeIn_d_bits_corrupt) // @[MixedNode.scala:551:17]
); // @[Nodes.scala:27:25]
Queue2_TLBundleA_a29d64s8k1z4u nodeOut_a_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeIn_a_ready),
.io_enq_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_enq_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_enq_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_enq_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_enq_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_enq_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_enq_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_enq_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_enq_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_deq_ready (nodeOut_a_ready), // @[MixedNode.scala:542:17]
.io_deq_valid (nodeOut_a_valid),
.io_deq_bits_opcode (nodeOut_a_bits_opcode),
.io_deq_bits_param (nodeOut_a_bits_param),
.io_deq_bits_size (nodeOut_a_bits_size),
.io_deq_bits_source (nodeOut_a_bits_source),
.io_deq_bits_address (nodeOut_a_bits_address),
.io_deq_bits_mask (nodeOut_a_bits_mask),
.io_deq_bits_data (nodeOut_a_bits_data),
.io_deq_bits_corrupt (nodeOut_a_bits_corrupt)
); // @[Decoupled.scala:362:21]
Queue2_TLBundleD_a29d64s8k1z4u nodeIn_d_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeOut_d_ready),
.io_enq_valid (nodeOut_d_valid), // @[MixedNode.scala:542:17]
.io_enq_bits_opcode (nodeOut_d_bits_opcode), // @[MixedNode.scala:542:17]
.io_enq_bits_param (nodeOut_d_bits_param), // @[MixedNode.scala:542:17]
.io_enq_bits_size (nodeOut_d_bits_size), // @[MixedNode.scala:542:17]
.io_enq_bits_source (nodeOut_d_bits_source), // @[MixedNode.scala:542:17]
.io_enq_bits_sink (nodeOut_d_bits_sink), // @[MixedNode.scala:542:17]
.io_enq_bits_denied (nodeOut_d_bits_denied), // @[MixedNode.scala:542:17]
.io_enq_bits_data (nodeOut_d_bits_data), // @[MixedNode.scala:542:17]
.io_enq_bits_corrupt (nodeOut_d_bits_corrupt), // @[MixedNode.scala:542:17]
.io_deq_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_deq_valid (nodeIn_d_valid),
.io_deq_bits_opcode (nodeIn_d_bits_opcode),
.io_deq_bits_param (nodeIn_d_bits_param),
.io_deq_bits_size (nodeIn_d_bits_size),
.io_deq_bits_source (nodeIn_d_bits_source),
.io_deq_bits_sink (nodeIn_d_bits_sink),
.io_deq_bits_denied (nodeIn_d_bits_denied),
.io_deq_bits_data (nodeIn_d_bits_data),
.io_deq_bits_corrupt (nodeIn_d_bits_corrupt)
); // @[Decoupled.scala:362:21]
assign auto_in_a_ready = auto_in_a_ready_0; // @[Buffer.scala:40:9]
assign auto_in_d_valid = auto_in_d_valid_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_opcode = auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_param = auto_in_d_bits_param_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_size = auto_in_d_bits_size_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_source = auto_in_d_bits_source_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_sink = auto_in_d_bits_sink_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_denied = auto_in_d_bits_denied_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_data = auto_in_d_bits_data_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_corrupt = auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_a_valid = auto_out_a_valid_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_opcode = auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_param = auto_out_a_bits_param_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_size = auto_out_a_bits_size_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_source = auto_out_a_bits_source_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_address = auto_out_a_bits_address_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_mask = auto_out_a_bits_mask_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_data = auto_out_a_bits_data_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_corrupt = auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_d_ready = auto_out_d_ready_0; // @[Buffer.scala:40:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File bim.scala:
package boom.v3.ifu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import boom.v3.common._
import boom.v3.util.{BoomCoreStringPrefix, WrapInc}
import scala.math.min
class BIMMeta(implicit p: Parameters) extends BoomBundle()(p)
with HasBoomFrontendParameters
{
val bims = Vec(bankWidth, UInt(2.W))
}
case class BoomBIMParams(
nSets: Int = 2048
)
class BIMBranchPredictorBank(params: BoomBIMParams = BoomBIMParams())(implicit p: Parameters) extends BranchPredictorBank()(p)
{
override val nSets = params.nSets
require(isPow2(nSets))
val nWrBypassEntries = 2
def bimWrite(v: UInt, taken: Bool): UInt = {
val old_bim_sat_taken = v === 3.U
val old_bim_sat_ntaken = v === 0.U
Mux(old_bim_sat_taken && taken, 3.U,
Mux(old_bim_sat_ntaken && !taken, 0.U,
Mux(taken, v + 1.U, v - 1.U)))
}
val s2_meta = Wire(new BIMMeta)
override val metaSz = s2_meta.asUInt.getWidth
val doing_reset = RegInit(true.B)
val reset_idx = RegInit(0.U(log2Ceil(nSets).W))
reset_idx := reset_idx + doing_reset
when (reset_idx === (nSets-1).U) { doing_reset := false.B }
val data = SyncReadMem(nSets, Vec(bankWidth, UInt(2.W)))
val mems = Seq(("bim", nSets, bankWidth * 2))
val s2_req_rdata = RegNext(data.read(s0_idx , s0_valid))
val s2_resp = Wire(Vec(bankWidth, Bool()))
for (w <- 0 until bankWidth) {
s2_resp(w) := s2_valid && s2_req_rdata(w)(1) && !doing_reset
s2_meta.bims(w) := s2_req_rdata(w)
}
val s1_update_wdata = Wire(Vec(bankWidth, UInt(2.W)))
val s1_update_wmask = Wire(Vec(bankWidth, Bool()))
val s1_update_meta = s1_update.bits.meta.asTypeOf(new BIMMeta)
val s1_update_index = s1_update_idx
val wrbypass_idxs = Reg(Vec(nWrBypassEntries, UInt(log2Ceil(nSets).W)))
val wrbypass = Reg(Vec(nWrBypassEntries, Vec(bankWidth, UInt(2.W))))
val wrbypass_enq_idx = RegInit(0.U(log2Ceil(nWrBypassEntries).W))
val wrbypass_hits = VecInit((0 until nWrBypassEntries) map { i =>
!doing_reset &&
wrbypass_idxs(i) === s1_update_index(log2Ceil(nSets)-1,0)
})
val wrbypass_hit = wrbypass_hits.reduce(_||_)
val wrbypass_hit_idx = PriorityEncoder(wrbypass_hits)
for (w <- 0 until bankWidth) {
s1_update_wmask(w) := false.B
s1_update_wdata(w) := DontCare
val update_pc = s1_update.bits.pc + (w << 1).U
when (s1_update.bits.br_mask(w) ||
(s1_update.bits.cfi_idx.valid && s1_update.bits.cfi_idx.bits === w.U)) {
val was_taken = (
s1_update.bits.cfi_idx.valid &&
(s1_update.bits.cfi_idx.bits === w.U) &&
(
(s1_update.bits.cfi_is_br && s1_update.bits.br_mask(w) && s1_update.bits.cfi_taken) ||
s1_update.bits.cfi_is_jal
)
)
val old_bim_value = Mux(wrbypass_hit, wrbypass(wrbypass_hit_idx)(w), s1_update_meta.bims(w))
s1_update_wmask(w) := true.B
s1_update_wdata(w) := bimWrite(old_bim_value, was_taken)
}
}
when (doing_reset || (s1_update.valid && s1_update.bits.is_commit_update)) {
data.write(
Mux(doing_reset, reset_idx, s1_update_index),
Mux(doing_reset, VecInit(Seq.fill(bankWidth) { 2.U }), s1_update_wdata),
Mux(doing_reset, (~(0.U(bankWidth.W))), s1_update_wmask.asUInt).asBools
)
}
when (s1_update_wmask.reduce(_||_) && s1_update.valid && s1_update.bits.is_commit_update) {
when (wrbypass_hit) {
wrbypass(wrbypass_hit_idx) := s1_update_wdata
} .otherwise {
wrbypass(wrbypass_enq_idx) := s1_update_wdata
wrbypass_idxs(wrbypass_enq_idx) := s1_update_index
wrbypass_enq_idx := WrapInc(wrbypass_enq_idx, nWrBypassEntries)
}
}
for (w <- 0 until bankWidth) {
io.resp.f2(w).taken := s2_resp(w)
io.resp.f3(w).taken := RegNext(io.resp.f2(w).taken)
}
io.f3_meta := RegNext(s2_meta.asUInt)
}
| module data_2( // @[bim.scala:50:26]
input [10:0] R0_addr,
input R0_en,
input R0_clk,
output [7:0] R0_data,
input [10:0] W0_addr,
input W0_en,
input W0_clk,
input [7:0] W0_data,
input [3:0] W0_mask
);
data_ext data_ext ( // @[bim.scala:50:26]
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data),
.W0_mask (W0_mask)
); // @[bim.scala:50:26]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
| module TLBuffer_a32d64s6k3z3c( // @[Buffer.scala:40:9]
input clock, // @[Buffer.scala:40:9]
input reset, // @[Buffer.scala:40:9]
output auto_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_b_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_b_valid, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_b_bits_param, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_in_b_bits_address, // @[LazyModuleImp.scala:107:25]
output auto_in_c_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_c_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_c_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_c_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_c_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_c_bits_address, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_c_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_e_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_e_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_b_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_b_valid, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_b_bits_param, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_out_b_bits_address, // @[LazyModuleImp.scala:107:25]
input auto_out_c_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_c_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_c_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_c_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_out_c_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_c_bits_address, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_c_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_e_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_e_bits_sink // @[LazyModuleImp.scala:107:25]
);
wire _nodeIn_d_q_io_deq_valid; // @[Decoupled.scala:362:21]
wire [2:0] _nodeIn_d_q_io_deq_bits_opcode; // @[Decoupled.scala:362:21]
wire [1:0] _nodeIn_d_q_io_deq_bits_param; // @[Decoupled.scala:362:21]
wire [2:0] _nodeIn_d_q_io_deq_bits_size; // @[Decoupled.scala:362:21]
wire [5:0] _nodeIn_d_q_io_deq_bits_source; // @[Decoupled.scala:362:21]
wire [2:0] _nodeIn_d_q_io_deq_bits_sink; // @[Decoupled.scala:362:21]
wire _nodeIn_d_q_io_deq_bits_denied; // @[Decoupled.scala:362:21]
wire _nodeIn_d_q_io_deq_bits_corrupt; // @[Decoupled.scala:362:21]
wire _nodeOut_a_q_io_enq_ready; // @[Decoupled.scala:362:21]
TLMonitor_37 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (_nodeOut_a_q_io_enq_ready), // @[Decoupled.scala:362:21]
.io_in_a_valid (auto_in_a_valid),
.io_in_a_bits_opcode (auto_in_a_bits_opcode),
.io_in_a_bits_param (auto_in_a_bits_param),
.io_in_a_bits_size (auto_in_a_bits_size),
.io_in_a_bits_source (auto_in_a_bits_source),
.io_in_a_bits_address (auto_in_a_bits_address),
.io_in_a_bits_mask (auto_in_a_bits_mask),
.io_in_a_bits_corrupt (auto_in_a_bits_corrupt),
.io_in_b_ready (auto_in_b_ready),
.io_in_b_valid (auto_out_b_valid),
.io_in_b_bits_param (auto_out_b_bits_param),
.io_in_b_bits_address (auto_out_b_bits_address),
.io_in_c_ready (auto_out_c_ready),
.io_in_c_valid (auto_in_c_valid),
.io_in_c_bits_opcode (auto_in_c_bits_opcode),
.io_in_c_bits_param (auto_in_c_bits_param),
.io_in_c_bits_size (auto_in_c_bits_size),
.io_in_c_bits_source (auto_in_c_bits_source),
.io_in_c_bits_address (auto_in_c_bits_address),
.io_in_c_bits_corrupt (auto_in_c_bits_corrupt),
.io_in_d_ready (auto_in_d_ready),
.io_in_d_valid (_nodeIn_d_q_io_deq_valid), // @[Decoupled.scala:362:21]
.io_in_d_bits_opcode (_nodeIn_d_q_io_deq_bits_opcode), // @[Decoupled.scala:362:21]
.io_in_d_bits_param (_nodeIn_d_q_io_deq_bits_param), // @[Decoupled.scala:362:21]
.io_in_d_bits_size (_nodeIn_d_q_io_deq_bits_size), // @[Decoupled.scala:362:21]
.io_in_d_bits_source (_nodeIn_d_q_io_deq_bits_source), // @[Decoupled.scala:362:21]
.io_in_d_bits_sink (_nodeIn_d_q_io_deq_bits_sink), // @[Decoupled.scala:362:21]
.io_in_d_bits_denied (_nodeIn_d_q_io_deq_bits_denied), // @[Decoupled.scala:362:21]
.io_in_d_bits_corrupt (_nodeIn_d_q_io_deq_bits_corrupt), // @[Decoupled.scala:362:21]
.io_in_e_valid (auto_in_e_valid),
.io_in_e_bits_sink (auto_in_e_bits_sink)
); // @[Nodes.scala:27:25]
Queue1_TLBundleA_a32d64s6k3z3c nodeOut_a_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (_nodeOut_a_q_io_enq_ready),
.io_enq_valid (auto_in_a_valid),
.io_enq_bits_opcode (auto_in_a_bits_opcode),
.io_enq_bits_param (auto_in_a_bits_param),
.io_enq_bits_size (auto_in_a_bits_size),
.io_enq_bits_source (auto_in_a_bits_source),
.io_enq_bits_address (auto_in_a_bits_address),
.io_enq_bits_mask (auto_in_a_bits_mask),
.io_enq_bits_data (auto_in_a_bits_data),
.io_enq_bits_corrupt (auto_in_a_bits_corrupt),
.io_deq_ready (auto_out_a_ready),
.io_deq_valid (auto_out_a_valid),
.io_deq_bits_opcode (auto_out_a_bits_opcode),
.io_deq_bits_param (auto_out_a_bits_param),
.io_deq_bits_size (auto_out_a_bits_size),
.io_deq_bits_source (auto_out_a_bits_source),
.io_deq_bits_address (auto_out_a_bits_address),
.io_deq_bits_mask (auto_out_a_bits_mask),
.io_deq_bits_data (auto_out_a_bits_data),
.io_deq_bits_corrupt (auto_out_a_bits_corrupt)
); // @[Decoupled.scala:362:21]
Queue1_TLBundleD_a32d64s6k3z3c nodeIn_d_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (auto_out_d_ready),
.io_enq_valid (auto_out_d_valid),
.io_enq_bits_opcode (auto_out_d_bits_opcode),
.io_enq_bits_param (auto_out_d_bits_param),
.io_enq_bits_size (auto_out_d_bits_size),
.io_enq_bits_source (auto_out_d_bits_source),
.io_enq_bits_sink (auto_out_d_bits_sink),
.io_enq_bits_denied (auto_out_d_bits_denied),
.io_enq_bits_data (auto_out_d_bits_data),
.io_enq_bits_corrupt (auto_out_d_bits_corrupt),
.io_deq_ready (auto_in_d_ready),
.io_deq_valid (_nodeIn_d_q_io_deq_valid),
.io_deq_bits_opcode (_nodeIn_d_q_io_deq_bits_opcode),
.io_deq_bits_param (_nodeIn_d_q_io_deq_bits_param),
.io_deq_bits_size (_nodeIn_d_q_io_deq_bits_size),
.io_deq_bits_source (_nodeIn_d_q_io_deq_bits_source),
.io_deq_bits_sink (_nodeIn_d_q_io_deq_bits_sink),
.io_deq_bits_denied (_nodeIn_d_q_io_deq_bits_denied),
.io_deq_bits_data (auto_in_d_bits_data),
.io_deq_bits_corrupt (_nodeIn_d_q_io_deq_bits_corrupt)
); // @[Decoupled.scala:362:21]
assign auto_in_a_ready = _nodeOut_a_q_io_enq_ready; // @[Decoupled.scala:362:21]
assign auto_in_b_valid = auto_out_b_valid; // @[Buffer.scala:40:9]
assign auto_in_b_bits_param = auto_out_b_bits_param; // @[Buffer.scala:40:9]
assign auto_in_b_bits_address = auto_out_b_bits_address; // @[Buffer.scala:40:9]
assign auto_in_c_ready = auto_out_c_ready; // @[Buffer.scala:40:9]
assign auto_in_d_valid = _nodeIn_d_q_io_deq_valid; // @[Decoupled.scala:362:21]
assign auto_in_d_bits_opcode = _nodeIn_d_q_io_deq_bits_opcode; // @[Decoupled.scala:362:21]
assign auto_in_d_bits_param = _nodeIn_d_q_io_deq_bits_param; // @[Decoupled.scala:362:21]
assign auto_in_d_bits_size = _nodeIn_d_q_io_deq_bits_size; // @[Decoupled.scala:362:21]
assign auto_in_d_bits_source = _nodeIn_d_q_io_deq_bits_source; // @[Decoupled.scala:362:21]
assign auto_in_d_bits_sink = _nodeIn_d_q_io_deq_bits_sink; // @[Decoupled.scala:362:21]
assign auto_in_d_bits_denied = _nodeIn_d_q_io_deq_bits_denied; // @[Decoupled.scala:362:21]
assign auto_in_d_bits_corrupt = _nodeIn_d_q_io_deq_bits_corrupt; // @[Decoupled.scala:362:21]
assign auto_out_b_ready = auto_in_b_ready; // @[Buffer.scala:40:9]
assign auto_out_c_valid = auto_in_c_valid; // @[Buffer.scala:40:9]
assign auto_out_c_bits_opcode = auto_in_c_bits_opcode; // @[Buffer.scala:40:9]
assign auto_out_c_bits_param = auto_in_c_bits_param; // @[Buffer.scala:40:9]
assign auto_out_c_bits_size = auto_in_c_bits_size; // @[Buffer.scala:40:9]
assign auto_out_c_bits_source = auto_in_c_bits_source; // @[Buffer.scala:40:9]
assign auto_out_c_bits_address = auto_in_c_bits_address; // @[Buffer.scala:40:9]
assign auto_out_c_bits_data = auto_in_c_bits_data; // @[Buffer.scala:40:9]
assign auto_out_c_bits_corrupt = auto_in_c_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_e_valid = auto_in_e_valid; // @[Buffer.scala:40:9]
assign auto_out_e_bits_sink = auto_in_e_bits_sink; // @[Buffer.scala:40:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RouteComputer.scala:
package constellation.router
import chisel3._
import chisel3.util._
import chisel3.util.experimental.decode.{TruthTable, decoder}
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import freechips.rocketchip.rocket.DecodeLogic
import constellation.channel._
import constellation.routing.{FlowRoutingBundle, FlowRoutingInfo}
import constellation.noc.{HasNoCParams}
class RouteComputerReq(implicit val p: Parameters) extends Bundle with HasNoCParams {
val src_virt_id = UInt(virtualChannelBits.W)
val flow = new FlowRoutingBundle
}
class RouteComputerResp(
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Bundle
with HasRouterOutputParams {
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
}
class RouteComputer(
val routerParams: RouterParams,
val inParams: Seq[ChannelParams],
val outParams: Seq[ChannelParams],
val ingressParams: Seq[IngressChannelParams],
val egressParams: Seq[EgressChannelParams]
)(implicit val p: Parameters) extends Module
with HasRouterParams
with HasRouterInputParams
with HasRouterOutputParams
with HasNoCParams {
val io = IO(new Bundle {
val req = MixedVec(allInParams.map { u => Flipped(Decoupled(new RouteComputerReq)) })
val resp = MixedVec(allInParams.map { u => Output(new RouteComputerResp(outParams, egressParams)) })
})
(io.req zip io.resp).zipWithIndex.map { case ((req, resp), i) =>
req.ready := true.B
if (outParams.size == 0) {
assert(!req.valid)
resp.vc_sel := DontCare
} else {
def toUInt(t: (Int, FlowRoutingInfo)): UInt = {
val l2 = (BigInt(t._1) << req.bits.flow.vnet_id .getWidth) | t._2.vNetId
val l3 = ( l2 << req.bits.flow.ingress_node .getWidth) | t._2.ingressNode
val l4 = ( l3 << req.bits.flow.ingress_node_id.getWidth) | t._2.ingressNodeId
val l5 = ( l4 << req.bits.flow.egress_node .getWidth) | t._2.egressNode
val l6 = ( l5 << req.bits.flow.egress_node_id .getWidth) | t._2.egressNodeId
l6.U(req.bits.getWidth.W)
}
val flow = req.bits.flow
val table = allInParams(i).possibleFlows.toSeq.distinct.map { pI =>
allInParams(i).channelRoutingInfos.map { cI =>
var row: String = "b"
(0 until nOutputs).foreach { o =>
(0 until outParams(o).nVirtualChannels).foreach { outVId =>
row = row + (if (routingRelation(cI, outParams(o).channelRoutingInfos(outVId), pI)) "1" else "0")
}
}
((cI.vc, pI), row)
}
}.flatten
val addr = req.bits.asUInt
val width = outParams.map(_.nVirtualChannels).reduce(_+_)
val decoded = if (table.size > 0) {
val truthTable = TruthTable(
table.map { e => (BitPat(toUInt(e._1)), BitPat(e._2)) },
BitPat("b" + "?" * width)
)
Reverse(decoder(addr, truthTable))
} else {
0.U(width.W)
}
var idx = 0
(0 until nAllOutputs).foreach { o =>
if (o < nOutputs) {
(0 until outParams(o).nVirtualChannels).foreach { outVId =>
resp.vc_sel(o)(outVId) := decoded(idx)
idx += 1
}
} else {
resp.vc_sel(o)(0) := false.B
}
}
}
}
}
| module RouteComputer_12( // @[RouteComputer.scala:29:7]
input [3:0] io_req_2_bits_flow_egress_node, // @[RouteComputer.scala:40:14]
input [1:0] io_req_2_bits_flow_egress_node_id, // @[RouteComputer.scala:40:14]
input [1:0] io_req_1_bits_src_virt_id, // @[RouteComputer.scala:40:14]
input [1:0] io_req_1_bits_flow_vnet_id, // @[RouteComputer.scala:40:14]
input [3:0] io_req_1_bits_flow_ingress_node, // @[RouteComputer.scala:40:14]
input [2:0] io_req_1_bits_flow_ingress_node_id, // @[RouteComputer.scala:40:14]
input [3:0] io_req_1_bits_flow_egress_node, // @[RouteComputer.scala:40:14]
input [1:0] io_req_1_bits_flow_egress_node_id, // @[RouteComputer.scala:40:14]
output io_resp_2_vc_sel_1_0, // @[RouteComputer.scala:40:14]
output io_resp_2_vc_sel_1_1, // @[RouteComputer.scala:40:14]
output io_resp_2_vc_sel_1_2, // @[RouteComputer.scala:40:14]
output io_resp_2_vc_sel_0_0, // @[RouteComputer.scala:40:14]
output io_resp_2_vc_sel_0_1, // @[RouteComputer.scala:40:14]
output io_resp_2_vc_sel_0_2, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_1_0, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_1_1, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_1_2, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_0, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_1, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_2 // @[RouteComputer.scala:40:14]
);
wire [16:0] decoded_invInputs_1 = ~{io_req_1_bits_src_virt_id, io_req_1_bits_flow_vnet_id, io_req_1_bits_flow_ingress_node, io_req_1_bits_flow_ingress_node_id, io_req_1_bits_flow_egress_node, io_req_1_bits_flow_egress_node_id}; // @[pla.scala:78:21]
wire [1:0] _GEN = ~(io_req_2_bits_flow_egress_node[3:2]); // @[pla.scala:78:21]
assign io_resp_2_vc_sel_1_0 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_2_vc_sel_1_1 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_2_vc_sel_1_2 = |{&{io_req_2_bits_flow_egress_node[2], _GEN[1]}, io_req_2_bits_flow_egress_node[3]}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}]
assign io_resp_2_vc_sel_0_0 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_2_vc_sel_0_1 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_2_vc_sel_0_2 = &{_GEN[0], _GEN[1]}; // @[pla.scala:78:21, :91:29, :98:{53,70}]
assign io_resp_1_vc_sel_1_0 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_1_vc_sel_1_1 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_1_vc_sel_1_2 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_1_vc_sel_0_0 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_1_vc_sel_0_1 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_1_vc_sel_0_2 = |{&{decoded_invInputs_1[0], io_req_1_bits_flow_egress_node_id[1], io_req_1_bits_flow_ingress_node[2], io_req_1_bits_flow_ingress_node[3], decoded_invInputs_1[13], io_req_1_bits_flow_vnet_id[1], decoded_invInputs_1[15]}, &{decoded_invInputs_1[0], io_req_1_bits_flow_egress_node_id[1], io_req_1_bits_flow_ingress_node[2], io_req_1_bits_flow_ingress_node[3], decoded_invInputs_1[13], io_req_1_bits_flow_vnet_id[1], decoded_invInputs_1[16]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File UART.scala:
package sifive.blocks.devices.uart
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.interrupts._
import freechips.rocketchip.prci._
import freechips.rocketchip.regmapper._
import freechips.rocketchip.subsystem._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.devices.tilelink._
import freechips.rocketchip.util._
import sifive.blocks.util._
/** UART parameters
*
* @param address uart device TL base address
* @param dataBits number of bits in data frame
* @param stopBits number of stop bits
* @param divisorBits width of baud rate divisor
* @param oversample constructs the times of sampling for every data bit
* @param nSamples number of reserved Rx sampling result for decide one data bit
* @param nTxEntries number of entries in fifo between TL bus and Tx
* @param nRxEntries number of entries in fifo between TL bus and Rx
* @param includeFourWire additional CTS/RTS ports for flow control
* @param includeParity parity support
* @param includeIndependentParity Tx and Rx have opposite parity modes
* @param initBaudRate initial baud rate
*
* @note baud rate divisor = clk frequency / baud rate. It means the number of clk period for one data bit.
* Calculated in [[UARTAttachParams.attachTo()]]
*
* @example To configure a 8N1 UART with features below:
* {{{
* 8 entries of Tx and Rx fifo
* Baud rate = 115200
* Rx samples each data bit 16 times
* Uses 3 sample result for each data bit
* }}}
* Set the stopBits as below and keep the other parameter unchanged
* {{{
* stopBits = 1
* }}}
*
*/
case class UARTParams(
address: BigInt,
dataBits: Int = 8,
stopBits: Int = 2,
divisorBits: Int = 16,
oversample: Int = 4,
nSamples: Int = 3,
nTxEntries: Int = 8,
nRxEntries: Int = 8,
includeFourWire: Boolean = false,
includeParity: Boolean = false,
includeIndependentParity: Boolean = false, // Tx and Rx have opposite parity modes
initBaudRate: BigInt = BigInt(115200),
) extends DeviceParams
{
def oversampleFactor = 1 << oversample
require(divisorBits > oversample)
require(oversampleFactor > nSamples)
require((dataBits == 8) || (dataBits == 9))
}
class UARTPortIO(val c: UARTParams) extends Bundle {
val txd = Output(Bool())
val rxd = Input(Bool())
val cts_n = c.includeFourWire.option(Input(Bool()))
val rts_n = c.includeFourWire.option(Output(Bool()))
}
class UARTInterrupts extends Bundle {
val rxwm = Bool()
val txwm = Bool()
}
//abstract class UART(busWidthBytes: Int, val c: UARTParams, divisorInit: Int = 0)
/** UART Module organizes Tx and Rx module with fifo and generates control signals for them according to CSRs and UART parameters.
*
* ==Component==
* - Tx
* - Tx fifo
* - Rx
* - Rx fifo
* - TL bus to soc
*
* ==IO==
* [[UARTPortIO]]
*
* ==Datapass==
* {{{
* TL bus -> Tx fifo -> Tx
* TL bus <- Rx fifo <- Rx
* }}}
*
* @param divisorInit: number of clk period for one data bit
*/
class UART(busWidthBytes: Int, val c: UARTParams, divisorInit: Int = 0)
(implicit p: Parameters)
extends IORegisterRouter(
RegisterRouterParams(
name = "serial",
compat = Seq("sifive,uart0"),
base = c.address,
beatBytes = busWidthBytes),
new UARTPortIO(c))
//with HasInterruptSources {
with HasInterruptSources with HasTLControlRegMap {
def nInterrupts = 1 + c.includeParity.toInt
ResourceBinding {
Resource(ResourceAnchors.aliases, "uart").bind(ResourceAlias(device.label))
}
require(divisorInit != 0, "UART divisor wasn't initialized during instantiation")
require(divisorInit >> c.divisorBits == 0, s"UART divisor reg (width $c.divisorBits) not wide enough to hold $divisorInit")
lazy val module = new LazyModuleImp(this) {
val txm = Module(new UARTTx(c))
val txq = Module(new Queue(UInt(c.dataBits.W), c.nTxEntries))
val rxm = Module(new UARTRx(c))
val rxq = Module(new Queue(UInt(c.dataBits.W), c.nRxEntries))
val div = RegInit(divisorInit.U(c.divisorBits.W))
private val stopCountBits = log2Up(c.stopBits)
private val txCountBits = log2Floor(c.nTxEntries) + 1
private val rxCountBits = log2Floor(c.nRxEntries) + 1
val txen = RegInit(false.B)
val rxen = RegInit(false.B)
val enwire4 = RegInit(false.B)
val invpol = RegInit(false.B)
val enparity = RegInit(false.B)
val parity = RegInit(false.B) // Odd parity - 1 , Even parity - 0
val errorparity = RegInit(false.B)
val errie = RegInit(false.B)
val txwm = RegInit(0.U(txCountBits.W))
val rxwm = RegInit(0.U(rxCountBits.W))
val nstop = RegInit(0.U(stopCountBits.W))
val data8or9 = RegInit(true.B)
if (c.includeFourWire){
txm.io.en := txen && (!port.cts_n.get || !enwire4)
txm.io.cts_n.get := port.cts_n.get
}
else
txm.io.en := txen
txm.io.in <> txq.io.deq
txm.io.div := div
txm.io.nstop := nstop
port.txd := txm.io.out
if (c.dataBits == 9) {
txm.io.data8or9.get := data8or9
rxm.io.data8or9.get := data8or9
}
rxm.io.en := rxen
rxm.io.in := port.rxd
rxq.io.enq.valid := rxm.io.out.valid
rxq.io.enq.bits := rxm.io.out.bits
rxm.io.div := div
val tx_busy = (txm.io.tx_busy || txq.io.count.orR) && txen
port.rts_n.foreach { r => r := Mux(enwire4, !(rxq.io.count < c.nRxEntries.U), tx_busy ^ invpol) }
if (c.includeParity) {
txm.io.enparity.get := enparity
txm.io.parity.get := parity
rxm.io.parity.get := parity ^ c.includeIndependentParity.B // independent parity on tx and rx
rxm.io.enparity.get := enparity
errorparity := rxm.io.errorparity.get || errorparity
interrupts(1) := errorparity && errie
}
val ie = RegInit(0.U.asTypeOf(new UARTInterrupts()))
val ip = Wire(new UARTInterrupts)
ip.txwm := (txq.io.count < txwm)
ip.rxwm := (rxq.io.count > rxwm)
interrupts(0) := (ip.txwm && ie.txwm) || (ip.rxwm && ie.rxwm)
val mapping = Seq(
UARTCtrlRegs.txfifo -> RegFieldGroup("txdata",Some("Transmit data"),
NonBlockingEnqueue(txq.io.enq)),
UARTCtrlRegs.rxfifo -> RegFieldGroup("rxdata",Some("Receive data"),
NonBlockingDequeue(rxq.io.deq)),
UARTCtrlRegs.txctrl -> RegFieldGroup("txctrl",Some("Serial transmit control"),Seq(
RegField(1, txen,
RegFieldDesc("txen","Transmit enable", reset=Some(0))),
RegField(stopCountBits, nstop,
RegFieldDesc("nstop","Number of stop bits", reset=Some(0))))),
UARTCtrlRegs.rxctrl -> Seq(RegField(1, rxen,
RegFieldDesc("rxen","Receive enable", reset=Some(0)))),
UARTCtrlRegs.txmark -> Seq(RegField(txCountBits, txwm,
RegFieldDesc("txcnt","Transmit watermark level", reset=Some(0)))),
UARTCtrlRegs.rxmark -> Seq(RegField(rxCountBits, rxwm,
RegFieldDesc("rxcnt","Receive watermark level", reset=Some(0)))),
UARTCtrlRegs.ie -> RegFieldGroup("ie",Some("Serial interrupt enable"),Seq(
RegField(1, ie.txwm,
RegFieldDesc("txwm_ie","Transmit watermark interrupt enable", reset=Some(0))),
RegField(1, ie.rxwm,
RegFieldDesc("rxwm_ie","Receive watermark interrupt enable", reset=Some(0))))),
UARTCtrlRegs.ip -> RegFieldGroup("ip",Some("Serial interrupt pending"),Seq(
RegField.r(1, ip.txwm,
RegFieldDesc("txwm_ip","Transmit watermark interrupt pending", volatile=true)),
RegField.r(1, ip.rxwm,
RegFieldDesc("rxwm_ip","Receive watermark interrupt pending", volatile=true)))),
UARTCtrlRegs.div -> Seq(
RegField(c.divisorBits, div,
RegFieldDesc("div","Baud rate divisor",reset=Some(divisorInit))))
)
val optionalparity = if (c.includeParity) Seq(
UARTCtrlRegs.parity -> RegFieldGroup("paritygenandcheck",Some("Odd/Even Parity Generation/Checking"),Seq(
RegField(1, enparity,
RegFieldDesc("enparity","Enable Parity Generation/Checking", reset=Some(0))),
RegField(1, parity,
RegFieldDesc("parity","Odd(1)/Even(0) Parity", reset=Some(0))),
RegField(1, errorparity,
RegFieldDesc("errorparity","Parity Status Sticky Bit", reset=Some(0))),
RegField(1, errie,
RegFieldDesc("errie","Interrupt on error in parity enable", reset=Some(0)))))) else Nil
val optionalwire4 = if (c.includeFourWire) Seq(
UARTCtrlRegs.wire4 -> RegFieldGroup("wire4",Some("Configure Clear-to-send / Request-to-send ports / RS-485"),Seq(
RegField(1, enwire4,
RegFieldDesc("enwire4","Enable CTS/RTS(1) or RS-485(0)", reset=Some(0))),
RegField(1, invpol,
RegFieldDesc("invpol","Invert polarity of RTS in RS-485 mode", reset=Some(0)))
))) else Nil
val optional8or9 = if (c.dataBits == 9) Seq(
UARTCtrlRegs.either8or9 -> RegFieldGroup("ConfigurableDataBits",Some("Configure number of data bits to be transmitted"),Seq(
RegField(1, data8or9,
RegFieldDesc("databits8or9","Data Bits to be 8(1) or 9(0)", reset=Some(1)))))) else Nil
regmap(mapping ++ optionalparity ++ optionalwire4 ++ optional8or9:_*)
}
}
class TLUART(busWidthBytes: Int, params: UARTParams, divinit: Int)(implicit p: Parameters)
extends UART(busWidthBytes, params, divinit) with HasTLControlRegMap
case class UARTLocated(loc: HierarchicalLocation) extends Field[Seq[UARTAttachParams]](Nil)
case class UARTAttachParams(
device: UARTParams,
controlWhere: TLBusWrapperLocation = PBUS,
blockerAddr: Option[BigInt] = None,
controlXType: ClockCrossingType = NoCrossing,
intXType: ClockCrossingType = NoCrossing) extends DeviceAttachParams
{
def attachTo(where: Attachable)(implicit p: Parameters): TLUART = where {
val name = s"uart_${UART.nextId()}"
val tlbus = where.locateTLBusWrapper(controlWhere)
val divinit = (tlbus.dtsFrequency.get / device.initBaudRate).toInt
val uartClockDomainWrapper = LazyModule(new ClockSinkDomain(take = None, name = Some("TLUART")))
val uart = uartClockDomainWrapper { LazyModule(new TLUART(tlbus.beatBytes, device, divinit)) }
uart.suggestName(name)
tlbus.coupleTo(s"device_named_$name") { bus =>
val blockerOpt = blockerAddr.map { a =>
val blocker = LazyModule(new TLClockBlocker(BasicBusBlockerParams(a, tlbus.beatBytes, tlbus.beatBytes)))
tlbus.coupleTo(s"bus_blocker_for_$name") { blocker.controlNode := TLFragmenter(tlbus, Some("UART_Blocker")) := _ }
blocker
}
uartClockDomainWrapper.clockNode := (controlXType match {
case _: SynchronousCrossing =>
tlbus.dtsClk.map(_.bind(uart.device))
tlbus.fixedClockNode
case _: RationalCrossing =>
tlbus.clockNode
case _: AsynchronousCrossing =>
val uartClockGroup = ClockGroup()
uartClockGroup := where.allClockGroupsNode
blockerOpt.map { _.clockNode := uartClockGroup } .getOrElse { uartClockGroup }
})
(uart.controlXing(controlXType)
:= TLFragmenter(tlbus, Some("UART"))
:= blockerOpt.map { _.node := bus } .getOrElse { bus })
}
(intXType match {
case _: SynchronousCrossing => where.ibus.fromSync
case _: RationalCrossing => where.ibus.fromRational
case _: AsynchronousCrossing => where.ibus.fromAsync
}) := uart.intXing(intXType)
uart
}
}
object UART {
val nextId = { var i = -1; () => { i += 1; i} }
def makePort(node: BundleBridgeSource[UARTPortIO], name: String)(implicit p: Parameters): ModuleValue[UARTPortIO] = {
val uartNode = node.makeSink()
InModuleBody { uartNode.makeIO()(ValName(name)) }
}
def tieoff(port: UARTPortIO) {
port.rxd := 1.U
if (port.c.includeFourWire) {
port.cts_n.foreach { ct => ct := false.B } // active-low
}
}
def loopback(port: UARTPortIO) {
port.rxd := port.txd
if (port.c.includeFourWire) {
port.cts_n.get := port.rts_n.get
}
}
}
/*
Copyright 2016 SiFive, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
File ClockDomain.scala:
package freechips.rocketchip.prci
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
abstract class Domain(implicit p: Parameters) extends LazyModule with HasDomainCrossing
{
def clockBundle: ClockBundle
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
childClock := clockBundle.clock
childReset := clockBundle.reset
override def provideImplicitClockToLazyChildren = true
// these are just for backwards compatibility with external devices
// that were manually wiring themselves to the domain's clock/reset input:
val clock = IO(Output(chiselTypeOf(clockBundle.clock)))
val reset = IO(Output(chiselTypeOf(clockBundle.reset)))
clock := clockBundle.clock
reset := clockBundle.reset
}
}
abstract class ClockDomain(implicit p: Parameters) extends Domain with HasClockDomainCrossing
class ClockSinkDomain(val clockSinkParams: ClockSinkParameters)(implicit p: Parameters) extends ClockDomain
{
def this(take: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSinkParameters(take = take, name = name))
val clockNode = ClockSinkNode(Seq(clockSinkParams))
def clockBundle = clockNode.in.head._1
override lazy val desiredName = (clockSinkParams.name.toSeq :+ "ClockSinkDomain").mkString
}
class ClockSourceDomain(val clockSourceParams: ClockSourceParameters)(implicit p: Parameters) extends ClockDomain
{
def this(give: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSourceParameters(give = give, name = name))
val clockNode = ClockSourceNode(Seq(clockSourceParams))
def clockBundle = clockNode.out.head._1
override lazy val desiredName = (clockSourceParams.name.toSeq :+ "ClockSourceDomain").mkString
}
abstract class ResetDomain(implicit p: Parameters) extends Domain with HasResetDomainCrossing
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
| module TLUARTClockSinkDomain( // @[ClockDomain.scala:14:9]
output auto_uart_0_int_xing_out_sync_0, // @[LazyModuleImp.scala:107:25]
output auto_uart_0_control_xing_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_uart_0_control_xing_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_uart_0_control_xing_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_uart_0_control_xing_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_uart_0_control_xing_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [12:0] auto_uart_0_control_xing_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [28:0] auto_uart_0_control_xing_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_uart_0_control_xing_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_uart_0_control_xing_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_uart_0_control_xing_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_uart_0_control_xing_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_uart_0_control_xing_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_uart_0_control_xing_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_uart_0_control_xing_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [12:0] auto_uart_0_control_xing_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_uart_0_control_xing_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_uart_0_io_out_txd, // @[LazyModuleImp.scala:107:25]
input auto_uart_0_io_out_rxd, // @[LazyModuleImp.scala:107:25]
input auto_clock_in_clock, // @[LazyModuleImp.scala:107:25]
input auto_clock_in_reset // @[LazyModuleImp.scala:107:25]
);
wire auto_uart_0_control_xing_in_a_valid_0 = auto_uart_0_control_xing_in_a_valid; // @[ClockDomain.scala:14:9]
wire [2:0] auto_uart_0_control_xing_in_a_bits_opcode_0 = auto_uart_0_control_xing_in_a_bits_opcode; // @[ClockDomain.scala:14:9]
wire [2:0] auto_uart_0_control_xing_in_a_bits_param_0 = auto_uart_0_control_xing_in_a_bits_param; // @[ClockDomain.scala:14:9]
wire [1:0] auto_uart_0_control_xing_in_a_bits_size_0 = auto_uart_0_control_xing_in_a_bits_size; // @[ClockDomain.scala:14:9]
wire [12:0] auto_uart_0_control_xing_in_a_bits_source_0 = auto_uart_0_control_xing_in_a_bits_source; // @[ClockDomain.scala:14:9]
wire [28:0] auto_uart_0_control_xing_in_a_bits_address_0 = auto_uart_0_control_xing_in_a_bits_address; // @[ClockDomain.scala:14:9]
wire [7:0] auto_uart_0_control_xing_in_a_bits_mask_0 = auto_uart_0_control_xing_in_a_bits_mask; // @[ClockDomain.scala:14:9]
wire [63:0] auto_uart_0_control_xing_in_a_bits_data_0 = auto_uart_0_control_xing_in_a_bits_data; // @[ClockDomain.scala:14:9]
wire auto_uart_0_control_xing_in_a_bits_corrupt_0 = auto_uart_0_control_xing_in_a_bits_corrupt; // @[ClockDomain.scala:14:9]
wire auto_uart_0_control_xing_in_d_ready_0 = auto_uart_0_control_xing_in_d_ready; // @[ClockDomain.scala:14:9]
wire auto_uart_0_io_out_rxd_0 = auto_uart_0_io_out_rxd; // @[ClockDomain.scala:14:9]
wire auto_clock_in_clock_0 = auto_clock_in_clock; // @[ClockDomain.scala:14:9]
wire auto_clock_in_reset_0 = auto_clock_in_reset; // @[ClockDomain.scala:14:9]
wire [1:0] auto_uart_0_control_xing_in_d_bits_param = 2'h0; // @[ClockDomain.scala:14:9]
wire auto_uart_0_control_xing_in_d_bits_sink = 1'h0; // @[ClockDomain.scala:14:9]
wire auto_uart_0_control_xing_in_d_bits_denied = 1'h0; // @[ClockDomain.scala:14:9]
wire auto_uart_0_control_xing_in_d_bits_corrupt = 1'h0; // @[ClockDomain.scala:14:9]
wire _childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire clockNodeIn_clock = auto_clock_in_clock_0; // @[ClockDomain.scala:14:9]
wire auto_uart_0_int_xing_out_sync_0_0; // @[ClockDomain.scala:14:9]
wire clockNodeIn_reset = auto_clock_in_reset_0; // @[ClockDomain.scala:14:9]
wire auto_uart_0_control_xing_in_a_ready_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_uart_0_control_xing_in_d_bits_opcode_0; // @[ClockDomain.scala:14:9]
wire [1:0] auto_uart_0_control_xing_in_d_bits_size_0; // @[ClockDomain.scala:14:9]
wire [12:0] auto_uart_0_control_xing_in_d_bits_source_0; // @[ClockDomain.scala:14:9]
wire [63:0] auto_uart_0_control_xing_in_d_bits_data_0; // @[ClockDomain.scala:14:9]
wire auto_uart_0_control_xing_in_d_valid_0; // @[ClockDomain.scala:14:9]
wire auto_uart_0_io_out_txd_0; // @[ClockDomain.scala:14:9]
wire childClock; // @[LazyModuleImp.scala:155:31]
wire childReset; // @[LazyModuleImp.scala:158:31]
assign childClock = clockNodeIn_clock; // @[MixedNode.scala:551:17]
assign childReset = clockNodeIn_reset; // @[MixedNode.scala:551:17]
TLUART uart_0 ( // @[UART.scala:271:51]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset), // @[LazyModuleImp.scala:158:31]
.auto_int_xing_out_sync_0 (auto_uart_0_int_xing_out_sync_0_0),
.auto_control_xing_in_a_ready (auto_uart_0_control_xing_in_a_ready_0),
.auto_control_xing_in_a_valid (auto_uart_0_control_xing_in_a_valid_0), // @[ClockDomain.scala:14:9]
.auto_control_xing_in_a_bits_opcode (auto_uart_0_control_xing_in_a_bits_opcode_0), // @[ClockDomain.scala:14:9]
.auto_control_xing_in_a_bits_param (auto_uart_0_control_xing_in_a_bits_param_0), // @[ClockDomain.scala:14:9]
.auto_control_xing_in_a_bits_size (auto_uart_0_control_xing_in_a_bits_size_0), // @[ClockDomain.scala:14:9]
.auto_control_xing_in_a_bits_source (auto_uart_0_control_xing_in_a_bits_source_0), // @[ClockDomain.scala:14:9]
.auto_control_xing_in_a_bits_address (auto_uart_0_control_xing_in_a_bits_address_0), // @[ClockDomain.scala:14:9]
.auto_control_xing_in_a_bits_mask (auto_uart_0_control_xing_in_a_bits_mask_0), // @[ClockDomain.scala:14:9]
.auto_control_xing_in_a_bits_data (auto_uart_0_control_xing_in_a_bits_data_0), // @[ClockDomain.scala:14:9]
.auto_control_xing_in_a_bits_corrupt (auto_uart_0_control_xing_in_a_bits_corrupt_0), // @[ClockDomain.scala:14:9]
.auto_control_xing_in_d_ready (auto_uart_0_control_xing_in_d_ready_0), // @[ClockDomain.scala:14:9]
.auto_control_xing_in_d_valid (auto_uart_0_control_xing_in_d_valid_0),
.auto_control_xing_in_d_bits_opcode (auto_uart_0_control_xing_in_d_bits_opcode_0),
.auto_control_xing_in_d_bits_size (auto_uart_0_control_xing_in_d_bits_size_0),
.auto_control_xing_in_d_bits_source (auto_uart_0_control_xing_in_d_bits_source_0),
.auto_control_xing_in_d_bits_data (auto_uart_0_control_xing_in_d_bits_data_0),
.auto_io_out_txd (auto_uart_0_io_out_txd_0),
.auto_io_out_rxd (auto_uart_0_io_out_rxd_0) // @[ClockDomain.scala:14:9]
); // @[UART.scala:271:51]
assign auto_uart_0_int_xing_out_sync_0 = auto_uart_0_int_xing_out_sync_0_0; // @[ClockDomain.scala:14:9]
assign auto_uart_0_control_xing_in_a_ready = auto_uart_0_control_xing_in_a_ready_0; // @[ClockDomain.scala:14:9]
assign auto_uart_0_control_xing_in_d_valid = auto_uart_0_control_xing_in_d_valid_0; // @[ClockDomain.scala:14:9]
assign auto_uart_0_control_xing_in_d_bits_opcode = auto_uart_0_control_xing_in_d_bits_opcode_0; // @[ClockDomain.scala:14:9]
assign auto_uart_0_control_xing_in_d_bits_size = auto_uart_0_control_xing_in_d_bits_size_0; // @[ClockDomain.scala:14:9]
assign auto_uart_0_control_xing_in_d_bits_source = auto_uart_0_control_xing_in_d_bits_source_0; // @[ClockDomain.scala:14:9]
assign auto_uart_0_control_xing_in_d_bits_data = auto_uart_0_control_xing_in_d_bits_data_0; // @[ClockDomain.scala:14:9]
assign auto_uart_0_io_out_txd = auto_uart_0_io_out_txd_0; // @[ClockDomain.scala:14:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
package constellation.channel
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy._
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.util._
import constellation.noc.{HasNoCParams}
class NoCMonitor(val cParam: ChannelParams)(implicit val p: Parameters) extends Module with HasNoCParams {
val io = IO(new Bundle {
val in = Input(new Channel(cParam))
})
val in_flight = RegInit(VecInit(Seq.fill(cParam.nVirtualChannels) { false.B }))
for (i <- 0 until cParam.srcSpeedup) {
val flit = io.in.flit(i)
when (flit.valid) {
when (flit.bits.head) {
in_flight(flit.bits.virt_channel_id) := true.B
assert (!in_flight(flit.bits.virt_channel_id), "Flit head/tail sequencing is broken")
}
when (flit.bits.tail) {
in_flight(flit.bits.virt_channel_id) := false.B
}
}
val possibleFlows = cParam.possibleFlows
when (flit.valid && flit.bits.head) {
cParam match {
case n: ChannelParams => n.virtualChannelParams.zipWithIndex.foreach { case (v,i) =>
assert(flit.bits.virt_channel_id =/= i.U || v.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
case _ => assert(cParam.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
}
}
}
File Types.scala:
package constellation.routing
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Parameters}
import constellation.noc.{HasNoCParams}
import constellation.channel.{Flit}
/** A representation for 1 specific virtual channel in wormhole routing
*
* @param src the source node
* @param vc ID for the virtual channel
* @param dst the destination node
* @param n_vc the number of virtual channels
*/
// BEGIN: ChannelRoutingInfo
case class ChannelRoutingInfo(
src: Int,
dst: Int,
vc: Int,
n_vc: Int
) {
// END: ChannelRoutingInfo
require (src >= -1 && dst >= -1 && vc >= 0, s"Illegal $this")
require (!(src == -1 && dst == -1), s"Illegal $this")
require (vc < n_vc, s"Illegal $this")
val isIngress = src == -1
val isEgress = dst == -1
}
/** Represents the properties of a packet that are relevant for routing
* ingressId and egressId uniquely identify a flow, but vnet and dst are used here
* to simplify the implementation of routingrelations
*
* @param ingressId packet's source ingress point
* @param egressId packet's destination egress point
* @param vNet virtual subnetwork identifier
* @param dst packet's destination node ID
*/
// BEGIN: FlowRoutingInfo
case class FlowRoutingInfo(
ingressId: Int,
egressId: Int,
vNetId: Int,
ingressNode: Int,
ingressNodeId: Int,
egressNode: Int,
egressNodeId: Int,
fifo: Boolean
) {
// END: FlowRoutingInfo
def isFlow(f: FlowRoutingBundle): Bool = {
(f.ingress_node === ingressNode.U &&
f.egress_node === egressNode.U &&
f.ingress_node_id === ingressNodeId.U &&
f.egress_node_id === egressNodeId.U)
}
def asLiteral(b: FlowRoutingBundle): BigInt = {
Seq(
(vNetId , b.vnet_id),
(ingressNode , b.ingress_node),
(ingressNodeId , b.ingress_node_id),
(egressNode , b.egress_node),
(egressNodeId , b.egress_node_id)
).foldLeft(0)((l, t) => {
(l << t._2.getWidth) | t._1
})
}
}
class FlowRoutingBundle(implicit val p: Parameters) extends Bundle with HasNoCParams {
// Instead of tracking ingress/egress ID, track the physical destination id and the offset at the destination
// This simplifies the routing tables
val vnet_id = UInt(log2Ceil(nVirtualNetworks).W)
val ingress_node = UInt(log2Ceil(nNodes).W)
val ingress_node_id = UInt(log2Ceil(maxIngressesAtNode).W)
val egress_node = UInt(log2Ceil(nNodes).W)
val egress_node_id = UInt(log2Ceil(maxEgressesAtNode).W)
}
| module NoCMonitor_120( // @[Monitor.scala:11:7]
input clock, // @[Monitor.scala:11:7]
input reset, // @[Monitor.scala:11:7]
input io_in_flit_0_valid, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_head, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_tail, // @[Monitor.scala:12:14]
input [3:0] io_in_flit_0_bits_flow_ingress_node, // @[Monitor.scala:12:14]
input [1:0] io_in_flit_0_bits_flow_ingress_node_id, // @[Monitor.scala:12:14]
input [3:0] io_in_flit_0_bits_flow_egress_node, // @[Monitor.scala:12:14]
input [2:0] io_in_flit_0_bits_flow_egress_node_id, // @[Monitor.scala:12:14]
input [3:0] io_in_flit_0_bits_virt_channel_id // @[Monitor.scala:12:14]
);
reg in_flight_0; // @[Monitor.scala:16:26]
reg in_flight_1; // @[Monitor.scala:16:26]
reg in_flight_2; // @[Monitor.scala:16:26]
reg in_flight_3; // @[Monitor.scala:16:26]
reg in_flight_4; // @[Monitor.scala:16:26]
reg in_flight_5; // @[Monitor.scala:16:26]
reg in_flight_6; // @[Monitor.scala:16:26]
reg in_flight_7; // @[Monitor.scala:16:26]
reg in_flight_8; // @[Monitor.scala:16:26]
reg in_flight_9; // @[Monitor.scala:16:26]
wire _GEN = io_in_flit_0_bits_virt_channel_id == 4'h0; // @[Monitor.scala:21:46]
wire _GEN_0 = io_in_flit_0_bits_virt_channel_id == 4'h1; // @[Monitor.scala:21:46]
wire _GEN_1 = io_in_flit_0_bits_virt_channel_id == 4'h2; // @[Monitor.scala:21:46]
wire _GEN_2 = io_in_flit_0_bits_virt_channel_id == 4'h3; // @[Monitor.scala:21:46]
wire _GEN_3 = io_in_flit_0_bits_virt_channel_id == 4'h4; // @[Monitor.scala:21:46]
wire _GEN_4 = io_in_flit_0_bits_virt_channel_id == 4'h5; // @[Monitor.scala:21:46]
wire _GEN_5 = io_in_flit_0_bits_virt_channel_id == 4'h6; // @[Monitor.scala:21:46]
wire _GEN_6 = io_in_flit_0_bits_virt_channel_id == 4'h7; // @[Monitor.scala:21:46] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_46( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_63 io_out_source_extend ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Tilelink.scala:
package constellation.protocol
import chisel3._
import chisel3.util._
import constellation.channel._
import constellation.noc._
import constellation.soc.{CanAttachToGlobalNoC}
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.util._
import freechips.rocketchip.tilelink._
import scala.collection.immutable.{ListMap}
trait TLFieldHelper {
def getBodyFields(b: TLChannel): Seq[Data] = b match {
case b: TLBundleA => Seq(b.mask, b.data, b.corrupt)
case b: TLBundleB => Seq(b.mask, b.data, b.corrupt)
case b: TLBundleC => Seq( b.data, b.corrupt)
case b: TLBundleD => Seq( b.data, b.corrupt)
case b: TLBundleE => Seq()
}
def getConstFields(b: TLChannel): Seq[Data] = b match {
case b: TLBundleA => Seq(b.opcode, b.param, b.size, b.source, b.address, b.user, b.echo )
case b: TLBundleB => Seq(b.opcode, b.param, b.size, b.source, b.address )
case b: TLBundleC => Seq(b.opcode, b.param, b.size, b.source, b.address, b.user, b.echo )
case b: TLBundleD => Seq(b.opcode, b.param, b.size, b.source, b.user, b.echo, b.sink, b.denied)
case b: TLBundleE => Seq( b.sink )
}
def minTLPayloadWidth(b: TLChannel): Int = Seq(getBodyFields(b), getConstFields(b)).map(_.map(_.getWidth).sum).max
def minTLPayloadWidth(bs: Seq[TLChannel]): Int = bs.map(b => minTLPayloadWidth(b)).max
def minTLPayloadWidth(b: TLBundle): Int = minTLPayloadWidth(Seq(b.a, b.b, b.c, b.d, b.e).map(_.bits))
}
class TLMasterToNoC(
edgeIn: TLEdge, edgesOut: Seq[TLEdge],
sourceStart: Int, sourceSize: Int,
wideBundle: TLBundleParameters,
slaveToEgressOffset: Int => Int,
flitWidth: Int
)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val tilelink = Flipped(new TLBundle(wideBundle))
val flits = new Bundle {
val a = Decoupled(new IngressFlit(flitWidth))
val b = Flipped(Decoupled(new EgressFlit(flitWidth)))
val c = Decoupled(new IngressFlit(flitWidth))
val d = Flipped(Decoupled(new EgressFlit(flitWidth)))
val e = Decoupled(new IngressFlit(flitWidth))
}
})
val a = Module(new TLAToNoC(edgeIn, edgesOut, wideBundle, (i) => slaveToEgressOffset(i) + 0, sourceStart))
val b = Module(new TLBFromNoC(edgeIn, wideBundle, sourceSize))
val c = Module(new TLCToNoC(edgeIn, edgesOut, wideBundle, (i) => slaveToEgressOffset(i) + 1, sourceStart))
val d = Module(new TLDFromNoC(edgeIn, wideBundle, sourceSize))
val e = Module(new TLEToNoC(edgeIn, edgesOut, wideBundle, (i) => slaveToEgressOffset(i) + 2))
a.io.protocol <> io.tilelink.a
io.tilelink.b <> b.io.protocol
c.io.protocol <> io.tilelink.c
io.tilelink.d <> d.io.protocol
e.io.protocol <> io.tilelink.e
io.flits.a <> a.io.flit
b.io.flit <> io.flits.b
io.flits.c <> c.io.flit
d.io.flit <> io.flits.d
io.flits.e <> e.io.flit
}
class TLMasterACDToNoC(
edgeIn: TLEdge, edgesOut: Seq[TLEdge],
sourceStart: Int, sourceSize: Int,
wideBundle: TLBundleParameters,
slaveToEgressOffset: Int => Int,
flitWidth: Int
)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val tilelink = Flipped(new TLBundle(wideBundle))
val flits = new Bundle {
val a = Decoupled(new IngressFlit(flitWidth))
val c = Decoupled(new IngressFlit(flitWidth))
val d = Flipped(Decoupled(new EgressFlit(flitWidth)))
}
})
io.tilelink := DontCare
val a = Module(new TLAToNoC(edgeIn, edgesOut, wideBundle, (i) => slaveToEgressOffset(i) + 0, sourceStart))
val c = Module(new TLCToNoC(edgeIn, edgesOut, wideBundle, (i) => slaveToEgressOffset(i) + 1, sourceStart))
val d = Module(new TLDFromNoC(edgeIn, wideBundle, sourceSize))
a.io.protocol <> io.tilelink.a
c.io.protocol <> io.tilelink.c
io.tilelink.d <> d.io.protocol
io.flits.a <> a.io.flit
io.flits.c <> c.io.flit
d.io.flit <> io.flits.d
}
class TLMasterBEToNoC(
edgeIn: TLEdge, edgesOut: Seq[TLEdge],
sourceStart: Int, sourceSize: Int,
wideBundle: TLBundleParameters,
slaveToEgressOffset: Int => Int,
flitWidth: Int
)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val tilelink = Flipped(new TLBundle(wideBundle))
val flits = new Bundle {
val b = Flipped(Decoupled(new EgressFlit(flitWidth)))
val e = Decoupled(new IngressFlit(flitWidth))
}
})
io.tilelink := DontCare
val b = Module(new TLBFromNoC(edgeIn, wideBundle, sourceSize))
val e = Module(new TLEToNoC(edgeIn, edgesOut, wideBundle, (i) => slaveToEgressOffset(i) + 0))
io.tilelink.b <> b.io.protocol
e.io.protocol <> io.tilelink.e
b.io.flit <> io.flits.b
io.flits.e <> e.io.flit
}
class TLSlaveToNoC(
edgeOut: TLEdge, edgesIn: Seq[TLEdge],
sourceStart: Int, sourceSize: Int,
wideBundle: TLBundleParameters,
masterToEgressOffset: Int => Int,
flitWidth: Int
)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val tilelink = new TLBundle(wideBundle)
val flits = new Bundle {
val a = Flipped(Decoupled(new EgressFlit(flitWidth)))
val b = Decoupled(new IngressFlit(flitWidth))
val c = Flipped(Decoupled(new EgressFlit(flitWidth)))
val d = Decoupled(new IngressFlit(flitWidth))
val e = Flipped(Decoupled(new EgressFlit(flitWidth)))
}
})
val a = Module(new TLAFromNoC(edgeOut, wideBundle))
val b = Module(new TLBToNoC(edgeOut, edgesIn, wideBundle, (i) => masterToEgressOffset(i) + 0))
val c = Module(new TLCFromNoC(edgeOut, wideBundle))
val d = Module(new TLDToNoC(edgeOut, edgesIn, wideBundle, (i) => masterToEgressOffset(i) + 1, sourceStart))
val e = Module(new TLEFromNoC(edgeOut, wideBundle, sourceSize))
io.tilelink.a <> a.io.protocol
b.io.protocol <> io.tilelink.b
io.tilelink.c <> c.io.protocol
d.io.protocol <> io.tilelink.d
io.tilelink.e <> e.io.protocol
a.io.flit <> io.flits.a
io.flits.b <> b.io.flit
c.io.flit <> io.flits.c
io.flits.d <> d.io.flit
e.io.flit <> io.flits.e
}
class TLSlaveACDToNoC(
edgeOut: TLEdge, edgesIn: Seq[TLEdge],
sourceStart: Int, sourceSize: Int,
wideBundle: TLBundleParameters,
masterToEgressOffset: Int => Int,
flitWidth: Int
)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val tilelink = new TLBundle(wideBundle)
val flits = new Bundle {
val a = Flipped(Decoupled(new EgressFlit(flitWidth)))
val c = Flipped(Decoupled(new EgressFlit(flitWidth)))
val d = Decoupled(new IngressFlit(flitWidth))
}
})
io.tilelink := DontCare
val a = Module(new TLAFromNoC(edgeOut, wideBundle))
val c = Module(new TLCFromNoC(edgeOut, wideBundle))
val d = Module(new TLDToNoC(edgeOut, edgesIn, wideBundle, (i) => masterToEgressOffset(i) + 0, sourceStart))
io.tilelink.a <> a.io.protocol
io.tilelink.c <> c.io.protocol
d.io.protocol <> io.tilelink.d
a.io.flit <> io.flits.a
c.io.flit <> io.flits.c
io.flits.d <> d.io.flit
}
class TLSlaveBEToNoC(
edgeOut: TLEdge, edgesIn: Seq[TLEdge],
sourceStart: Int, sourceSize: Int,
wideBundle: TLBundleParameters,
masterToEgressOffset: Int => Int,
flitWidth: Int
)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val tilelink = new TLBundle(wideBundle)
val flits = new Bundle {
val b = Decoupled(new IngressFlit(flitWidth))
val e = Flipped(Decoupled(new EgressFlit(flitWidth)))
}
})
io.tilelink := DontCare
val b = Module(new TLBToNoC(edgeOut, edgesIn, wideBundle, (i) => masterToEgressOffset(i) + 0))
val e = Module(new TLEFromNoC(edgeOut, wideBundle, sourceSize))
b.io.protocol <> io.tilelink.b
io.tilelink.e <> e.io.protocol
io.flits.b <> b.io.flit
e.io.flit <> io.flits.e
}
class TileLinkInterconnectInterface(edgesIn: Seq[TLEdge], edgesOut: Seq[TLEdge])(implicit val p: Parameters) extends Bundle {
val in = MixedVec(edgesIn.map { e => Flipped(new TLBundle(e.bundle)) })
val out = MixedVec(edgesOut.map { e => new TLBundle(e.bundle) })
}
trait TileLinkProtocolParams extends ProtocolParams with TLFieldHelper {
def edgesIn: Seq[TLEdge]
def edgesOut: Seq[TLEdge]
def edgeInNodes: Seq[Int]
def edgeOutNodes: Seq[Int]
require(edgesIn.size == edgeInNodes.size && edgesOut.size == edgeOutNodes.size)
def wideBundle = TLBundleParameters.union(edgesIn.map(_.bundle) ++ edgesOut.map(_.bundle))
def genBundle = new TLBundle(wideBundle)
def inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client))
def outputIdRanges = TLXbar.mapOutputIds(edgesOut.map(_.manager))
val vNetBlocking = (blocker: Int, blockee: Int) => blocker < blockee
def genIO()(implicit p: Parameters): Data = new TileLinkInterconnectInterface(edgesIn, edgesOut)
}
object TLConnect {
def apply[T <: TLBundleBase](l: DecoupledIO[T], r: DecoupledIO[T]) = {
l.valid := r.valid
r.ready := l.ready
l.bits.squeezeAll.waiveAll :<>= r.bits.squeezeAll.waiveAll
}
}
// BEGIN: TileLinkProtocolParams
case class TileLinkABCDEProtocolParams(
edgesIn: Seq[TLEdge],
edgesOut: Seq[TLEdge],
edgeInNodes: Seq[Int],
edgeOutNodes: Seq[Int]
) extends TileLinkProtocolParams {
// END: TileLinkProtocolParams
val minPayloadWidth = minTLPayloadWidth(new TLBundle(wideBundle))
val ingressNodes = (edgeInNodes.map(u => Seq.fill(3) (u)) ++ edgeOutNodes.map(u => Seq.fill (2) {u})).flatten
val egressNodes = (edgeInNodes.map(u => Seq.fill(2) (u)) ++ edgeOutNodes.map(u => Seq.fill (3) {u})).flatten
val nVirtualNetworks = 5
val flows = edgesIn.zipWithIndex.map { case (edgeIn, ii) => edgesOut.zipWithIndex.map { case (edgeOut, oi) =>
val reachable = edgeIn.client.clients.exists { c => edgeOut.manager.managers.exists { m =>
c.visibility.exists { ca => m.address.exists { ma =>
ca.overlaps(ma)
}}
}}
val probe = edgeIn.client.anySupportProbe && edgeOut.manager.managers.exists(_.regionType >= RegionType.TRACKED)
val release = edgeIn.client.anySupportProbe && edgeOut.manager.anySupportAcquireB
( (if (reachable) Some(FlowParams(ii * 3 + 0 , oi * 3 + 0 + edgesIn.size * 2, 4)) else None) ++ // A
(if (probe ) Some(FlowParams(oi * 2 + 0 + edgesIn.size * 3, ii * 2 + 0 , 3)) else None) ++ // B
(if (release ) Some(FlowParams(ii * 3 + 1 , oi * 3 + 1 + edgesIn.size * 2, 2)) else None) ++ // C
(if (reachable) Some(FlowParams(oi * 2 + 1 + edgesIn.size * 3, ii * 2 + 1 , 1)) else None) ++ // D
(if (release ) Some(FlowParams(ii * 3 + 2 , oi * 3 + 2 + edgesIn.size * 2, 0)) else None)) // E
}}.flatten.flatten
def interface(terminals: NoCTerminalIO,
ingressOffset: Int, egressOffset: Int, protocol: Data)(implicit p: Parameters) = {
val ingresses = terminals.ingress
val egresses = terminals.egress
protocol match { case protocol: TileLinkInterconnectInterface => {
edgesIn.zipWithIndex.map { case (e,i) =>
val nif_master = Module(new TLMasterToNoC(
e, edgesOut, inputIdRanges(i).start, inputIdRanges(i).size,
wideBundle,
(s) => s * 3 + edgesIn.size * 2 + egressOffset,
minPayloadWidth
))
nif_master.io.tilelink := DontCare
nif_master.io.tilelink.a.valid := false.B
nif_master.io.tilelink.c.valid := false.B
nif_master.io.tilelink.e.valid := false.B
TLConnect(nif_master.io.tilelink.a, protocol.in(i).a)
TLConnect(protocol.in(i).d, nif_master.io.tilelink.d)
if (protocol.in(i).params.hasBCE) {
TLConnect(protocol.in(i).b, nif_master.io.tilelink.b)
TLConnect(nif_master.io.tilelink.c, protocol.in(i).c)
TLConnect(nif_master.io.tilelink.e, protocol.in(i).e)
}
ingresses(i * 3 + 0).flit <> nif_master.io.flits.a
ingresses(i * 3 + 1).flit <> nif_master.io.flits.c
ingresses(i * 3 + 2).flit <> nif_master.io.flits.e
nif_master.io.flits.b <> egresses(i * 2 + 0).flit
nif_master.io.flits.d <> egresses(i * 2 + 1).flit
}
edgesOut.zipWithIndex.map { case (e,i) =>
val nif_slave = Module(new TLSlaveToNoC(
e, edgesIn, outputIdRanges(i).start, outputIdRanges(i).size,
wideBundle,
(s) => s * 2 + egressOffset,
minPayloadWidth
))
nif_slave.io.tilelink := DontCare
nif_slave.io.tilelink.b.valid := false.B
nif_slave.io.tilelink.d.valid := false.B
TLConnect(protocol.out(i).a, nif_slave.io.tilelink.a)
TLConnect(nif_slave.io.tilelink.d, protocol.out(i).d)
if (protocol.out(i).params.hasBCE) {
TLConnect(nif_slave.io.tilelink.b, protocol.out(i).b)
TLConnect(protocol.out(i).c, nif_slave.io.tilelink.c)
TLConnect(protocol.out(i).e, nif_slave.io.tilelink.e)
}
ingresses(i * 2 + 0 + edgesIn.size * 3).flit <> nif_slave.io.flits.b
ingresses(i * 2 + 1 + edgesIn.size * 3).flit <> nif_slave.io.flits.d
nif_slave.io.flits.a <> egresses(i * 3 + 0 + edgesIn.size * 2).flit
nif_slave.io.flits.c <> egresses(i * 3 + 1 + edgesIn.size * 2).flit
nif_slave.io.flits.e <> egresses(i * 3 + 2 + edgesIn.size * 2).flit
}
} }
}
}
case class TileLinkACDProtocolParams(
edgesIn: Seq[TLEdge],
edgesOut: Seq[TLEdge],
edgeInNodes: Seq[Int],
edgeOutNodes: Seq[Int]) extends TileLinkProtocolParams {
val minPayloadWidth = minTLPayloadWidth(Seq(genBundle.a, genBundle.c, genBundle.d).map(_.bits))
val ingressNodes = (edgeInNodes.map(u => Seq.fill(2) (u)) ++ edgeOutNodes.map(u => Seq.fill (1) {u})).flatten
val egressNodes = (edgeInNodes.map(u => Seq.fill(1) (u)) ++ edgeOutNodes.map(u => Seq.fill (2) {u})).flatten
val nVirtualNetworks = 3
val flows = edgesIn.zipWithIndex.map { case (edgeIn, ii) => edgesOut.zipWithIndex.map { case (edgeOut, oi) =>
val reachable = edgeIn.client.clients.exists { c => edgeOut.manager.managers.exists { m =>
c.visibility.exists { ca => m.address.exists { ma =>
ca.overlaps(ma)
}}
}}
val release = edgeIn.client.anySupportProbe && edgeOut.manager.anySupportAcquireB
( (if (reachable) Some(FlowParams(ii * 2 + 0 , oi * 2 + 0 + edgesIn.size * 1, 2)) else None) ++ // A
(if (release ) Some(FlowParams(ii * 2 + 1 , oi * 2 + 1 + edgesIn.size * 1, 1)) else None) ++ // C
(if (reachable) Some(FlowParams(oi * 1 + 0 + edgesIn.size * 2, ii * 1 + 0 , 0)) else None)) // D
}}.flatten.flatten
def interface(terminals: NoCTerminalIO,
ingressOffset: Int, egressOffset: Int, protocol: Data)(implicit p: Parameters) = {
val ingresses = terminals.ingress
val egresses = terminals.egress
protocol match { case protocol: TileLinkInterconnectInterface => {
protocol := DontCare
edgesIn.zipWithIndex.map { case (e,i) =>
val nif_master_acd = Module(new TLMasterACDToNoC(
e, edgesOut, inputIdRanges(i).start, inputIdRanges(i).size,
wideBundle,
(s) => s * 2 + edgesIn.size * 1 + egressOffset,
minPayloadWidth
))
nif_master_acd.io.tilelink := DontCare
nif_master_acd.io.tilelink.a.valid := false.B
nif_master_acd.io.tilelink.c.valid := false.B
nif_master_acd.io.tilelink.e.valid := false.B
TLConnect(nif_master_acd.io.tilelink.a, protocol.in(i).a)
TLConnect(protocol.in(i).d, nif_master_acd.io.tilelink.d)
if (protocol.in(i).params.hasBCE) {
TLConnect(nif_master_acd.io.tilelink.c, protocol.in(i).c)
}
ingresses(i * 2 + 0).flit <> nif_master_acd.io.flits.a
ingresses(i * 2 + 1).flit <> nif_master_acd.io.flits.c
nif_master_acd.io.flits.d <> egresses(i * 1 + 0).flit
}
edgesOut.zipWithIndex.map { case (e,i) =>
val nif_slave_acd = Module(new TLSlaveACDToNoC(
e, edgesIn, outputIdRanges(i).start, outputIdRanges(i).size,
wideBundle,
(s) => s * 1 + egressOffset,
minPayloadWidth
))
nif_slave_acd.io.tilelink := DontCare
nif_slave_acd.io.tilelink.b.valid := false.B
nif_slave_acd.io.tilelink.d.valid := false.B
TLConnect(protocol.out(i).a, nif_slave_acd.io.tilelink.a)
TLConnect(nif_slave_acd.io.tilelink.d, protocol.out(i).d)
if (protocol.out(i).params.hasBCE) {
TLConnect(protocol.out(i).c, nif_slave_acd.io.tilelink.c)
}
ingresses(i * 1 + 0 + edgesIn.size * 2).flit <> nif_slave_acd.io.flits.d
nif_slave_acd.io.flits.a <> egresses(i * 2 + 0 + edgesIn.size * 1).flit
nif_slave_acd.io.flits.c <> egresses(i * 2 + 1 + edgesIn.size * 1).flit
}
}}
}
}
case class TileLinkBEProtocolParams(
edgesIn: Seq[TLEdge],
edgesOut: Seq[TLEdge],
edgeInNodes: Seq[Int],
edgeOutNodes: Seq[Int]) extends TileLinkProtocolParams {
val minPayloadWidth = minTLPayloadWidth(Seq(genBundle.b, genBundle.e).map(_.bits))
val ingressNodes = (edgeInNodes.map(u => Seq.fill(1) (u)) ++ edgeOutNodes.map(u => Seq.fill (1) {u})).flatten
val egressNodes = (edgeInNodes.map(u => Seq.fill(1) (u)) ++ edgeOutNodes.map(u => Seq.fill (1) {u})).flatten
val nVirtualNetworks = 2
val flows = edgesIn.zipWithIndex.map { case (edgeIn, ii) => edgesOut.zipWithIndex.map { case (edgeOut, oi) =>
val probe = edgeIn.client.anySupportProbe && edgeOut.manager.managers.exists(_.regionType >= RegionType.TRACKED)
val release = edgeIn.client.anySupportProbe && edgeOut.manager.anySupportAcquireB
( (if (probe ) Some(FlowParams(oi * 1 + 0 + edgesIn.size * 1, ii * 1 + 0 , 1)) else None) ++ // B
(if (release ) Some(FlowParams(ii * 1 + 0 , oi * 1 + 0 + edgesIn.size * 1, 0)) else None)) // E
}}.flatten.flatten
def interface(terminals: NoCTerminalIO,
ingressOffset: Int, egressOffset: Int, protocol: Data)(implicit p: Parameters) = {
val ingresses = terminals.ingress
val egresses = terminals.egress
protocol match { case protocol: TileLinkInterconnectInterface => {
protocol := DontCare
edgesIn.zipWithIndex.map { case (e,i) =>
val nif_master_be = Module(new TLMasterBEToNoC(
e, edgesOut, inputIdRanges(i).start, inputIdRanges(i).size,
wideBundle,
(s) => s * 1 + edgesIn.size * 1 + egressOffset,
minPayloadWidth
))
nif_master_be.io.tilelink := DontCare
nif_master_be.io.tilelink.a.valid := false.B
nif_master_be.io.tilelink.c.valid := false.B
nif_master_be.io.tilelink.e.valid := false.B
if (protocol.in(i).params.hasBCE) {
TLConnect(protocol.in(i).b, nif_master_be.io.tilelink.b)
TLConnect(nif_master_be.io.tilelink.e, protocol.in(i).e)
}
ingresses(i * 1 + 0).flit <> nif_master_be.io.flits.e
nif_master_be.io.flits.b <> egresses(i * 1 + 0).flit
}
edgesOut.zipWithIndex.map { case (e,i) =>
val nif_slave_be = Module(new TLSlaveBEToNoC(
e, edgesIn, outputIdRanges(i).start, outputIdRanges(i).size,
wideBundle,
(s) => s * 1 + egressOffset,
minPayloadWidth
))
nif_slave_be.io.tilelink := DontCare
nif_slave_be.io.tilelink.b.valid := false.B
nif_slave_be.io.tilelink.d.valid := false.B
if (protocol.out(i).params.hasBCE) {
TLConnect(protocol.out(i).e, nif_slave_be.io.tilelink.e)
TLConnect(nif_slave_be.io.tilelink.b, protocol.out(i).b)
}
ingresses(i * 1 + 0 + edgesIn.size * 1).flit <> nif_slave_be.io.flits.b
nif_slave_be.io.flits.e <> egresses(i * 1 + 0 + edgesIn.size * 1).flit
}
}}
}
}
abstract class TLNoCLike(implicit p: Parameters) extends LazyModule {
val node = new TLNexusNode(
clientFn = { seq =>
seq(0).v1copy(
echoFields = BundleField.union(seq.flatMap(_.echoFields)),
requestFields = BundleField.union(seq.flatMap(_.requestFields)),
responseKeys = seq.flatMap(_.responseKeys).distinct,
minLatency = seq.map(_.minLatency).min,
clients = (TLXbar.mapInputIds(seq) zip seq) flatMap { case (range, port) =>
port.clients map { client => client.v1copy(
sourceId = client.sourceId.shift(range.start)
)}
}
)
},
managerFn = { seq =>
val fifoIdFactory = TLXbar.relabeler()
seq(0).v1copy(
responseFields = BundleField.union(seq.flatMap(_.responseFields)),
requestKeys = seq.flatMap(_.requestKeys).distinct,
minLatency = seq.map(_.minLatency).min,
endSinkId = TLXbar.mapOutputIds(seq).map(_.end).max,
managers = seq.flatMap { port =>
require (port.beatBytes == seq(0).beatBytes,
s"TLNoC (data widths don't match: ${port.managers.map(_.name)} has ${port.beatBytes}B vs ${seq(0).managers.map(_.name)} has ${seq(0).beatBytes}B")
// TileLink NoC does not preserve FIFO-ness, masters to this NoC should instantiate FIFOFixers
port.managers map { manager => manager.v1copy(fifoId = None) }
}
)
}
)
}
abstract class TLNoCModuleImp(outer: LazyModule) extends LazyModuleImp(outer) {
val edgesIn: Seq[TLEdge]
val edgesOut: Seq[TLEdge]
val nodeMapping: DiplomaticNetworkNodeMapping
val nocName: String
lazy val inNames = nodeMapping.genUniqueName(edgesIn.map(_.master.masters.map(_.name)))
lazy val outNames = nodeMapping.genUniqueName(edgesOut.map(_.slave.slaves.map(_.name)))
lazy val edgeInNodes = nodeMapping.getNodesIn(inNames)
lazy val edgeOutNodes = nodeMapping.getNodesOut(outNames)
def printNodeMappings() {
println(s"Constellation: TLNoC $nocName inwards mapping:")
for ((n, i) <- inNames zip edgeInNodes) {
val node = i.map(_.toString).getOrElse("X")
println(s" $node <- $n")
}
println(s"Constellation: TLNoC $nocName outwards mapping:")
for ((n, i) <- outNames zip edgeOutNodes) {
val node = i.map(_.toString).getOrElse("X")
println(s" $node <- $n")
}
}
}
trait TLNoCParams
// Instantiates a private TLNoC. Replaces the TLXbar
// BEGIN: TLNoCParams
case class SimpleTLNoCParams(
nodeMappings: DiplomaticNetworkNodeMapping,
nocParams: NoCParams = NoCParams(),
) extends TLNoCParams
class TLNoC(params: SimpleTLNoCParams, name: String = "test", inlineNoC: Boolean = false)(implicit p: Parameters) extends TLNoCLike {
// END: TLNoCParams
override def shouldBeInlined = inlineNoC
lazy val module = new TLNoCModuleImp(this) {
val (io_in, edgesIn) = node.in.unzip
val (io_out, edgesOut) = node.out.unzip
val nodeMapping = params.nodeMappings
val nocName = name
printNodeMappings()
val protocolParams = TileLinkABCDEProtocolParams(
edgesIn = edgesIn,
edgesOut = edgesOut,
edgeInNodes = edgeInNodes.flatten,
edgeOutNodes = edgeOutNodes.flatten
)
val noc = Module(new ProtocolNoC(ProtocolNoCParams(
params.nocParams.copy(hasCtrl = false, nocName=name, inlineNoC = inlineNoC),
Seq(protocolParams),
inlineNoC = inlineNoC
)))
noc.io.protocol(0) match {
case protocol: TileLinkInterconnectInterface => {
(protocol.in zip io_in).foreach { case (l,r) => l <> r }
(io_out zip protocol.out).foreach { case (l,r) => l <> r }
}
}
}
}
case class SplitACDxBETLNoCParams(
nodeMappings: DiplomaticNetworkNodeMapping,
acdNoCParams: NoCParams = NoCParams(),
beNoCParams: NoCParams = NoCParams(),
beDivision: Int = 2
) extends TLNoCParams
class TLSplitACDxBENoC(params: SplitACDxBETLNoCParams, name: String = "test", inlineNoC: Boolean = false)(implicit p: Parameters) extends TLNoCLike {
override def shouldBeInlined = inlineNoC
lazy val module = new TLNoCModuleImp(this) {
val (io_in, edgesIn) = node.in.unzip
val (io_out, edgesOut) = node.out.unzip
val nodeMapping = params.nodeMappings
val nocName = name
printNodeMappings()
val acdProtocolParams = TileLinkACDProtocolParams(
edgesIn = edgesIn,
edgesOut = edgesOut,
edgeInNodes = edgeInNodes.flatten,
edgeOutNodes = edgeOutNodes.flatten
)
val beProtocolParams = TileLinkBEProtocolParams(
edgesIn = edgesIn,
edgesOut = edgesOut,
edgeInNodes = edgeInNodes.flatten,
edgeOutNodes = edgeOutNodes.flatten
)
val acd_noc = Module(new ProtocolNoC(ProtocolNoCParams(
params.acdNoCParams.copy(hasCtrl = false, nocName=s"${name}_acd", inlineNoC = inlineNoC),
Seq(acdProtocolParams),
inlineNoC = inlineNoC
)))
val be_noc = Module(new ProtocolNoC(ProtocolNoCParams(
params.beNoCParams.copy(hasCtrl = false, nocName=s"${name}_be", inlineNoC = inlineNoC),
Seq(beProtocolParams),
widthDivision = params.beDivision,
inlineNoC = inlineNoC
)))
acd_noc.io.protocol(0) match { case protocol: TileLinkInterconnectInterface => {
(protocol.in zip io_in).foreach { case (l,r) =>
l := DontCare
l.a <> r.a
l.c <> r.c
l.d <> r.d
}
(io_out zip protocol.out).foreach { case (l,r) =>
r := DontCare
l.a <> r.a
l.c <> r.c
l.d <> r.d
}
}}
be_noc.io.protocol(0) match { case protocol: TileLinkInterconnectInterface => {
(protocol.in zip io_in).foreach { case (l,r) =>
l := DontCare
l.b <> r.b
l.e <> r.e
}
(io_out zip protocol.out).foreach { case (l,r) =>
r := DontCare
l.b <> r.b
l.e <> r.e
}
}}
}
}
case class GlobalTLNoCParams(
nodeMappings: DiplomaticNetworkNodeMapping
) extends TLNoCParams
// Maps this interconnect onto a global NoC
class TLGlobalNoC(params: GlobalTLNoCParams, name: String = "test")(implicit p: Parameters) extends TLNoCLike {
lazy val module = new TLNoCModuleImp(this) with CanAttachToGlobalNoC {
val (io_in, edgesIn) = node.in.unzip
val (io_out, edgesOut) = node.out.unzip
val nodeMapping = params.nodeMappings
val nocName = name
val protocolParams = TileLinkABCDEProtocolParams(
edgesIn = edgesIn,
edgesOut = edgesOut,
edgeInNodes = edgeInNodes.flatten,
edgeOutNodes = edgeOutNodes.flatten
)
printNodeMappings()
val io_global = IO(Flipped(protocolParams.genIO()))
io_global match {
case protocol: TileLinkInterconnectInterface => {
(protocol.in zip io_in).foreach { case (l,r) => l <> r }
(io_out zip protocol.out).foreach { case (l,r) => l <> r }
}
}
}
}
| module TLSlaveToNoC_4( // @[Tilelink.scala:125:7]
input clock, // @[Tilelink.scala:125:7]
input reset, // @[Tilelink.scala:125:7]
input io_tilelink_a_ready, // @[Tilelink.scala:132:14]
output io_tilelink_a_valid, // @[Tilelink.scala:132:14]
output [2:0] io_tilelink_a_bits_opcode, // @[Tilelink.scala:132:14]
output [2:0] io_tilelink_a_bits_param, // @[Tilelink.scala:132:14]
output [3:0] io_tilelink_a_bits_size, // @[Tilelink.scala:132:14]
output [5:0] io_tilelink_a_bits_source, // @[Tilelink.scala:132:14]
output [31:0] io_tilelink_a_bits_address, // @[Tilelink.scala:132:14]
output [7:0] io_tilelink_a_bits_mask, // @[Tilelink.scala:132:14]
output [63:0] io_tilelink_a_bits_data, // @[Tilelink.scala:132:14]
output io_tilelink_a_bits_corrupt, // @[Tilelink.scala:132:14]
output io_tilelink_b_ready, // @[Tilelink.scala:132:14]
input io_tilelink_b_valid, // @[Tilelink.scala:132:14]
input [1:0] io_tilelink_b_bits_param, // @[Tilelink.scala:132:14]
input [5:0] io_tilelink_b_bits_source, // @[Tilelink.scala:132:14]
input [31:0] io_tilelink_b_bits_address, // @[Tilelink.scala:132:14]
input io_tilelink_c_ready, // @[Tilelink.scala:132:14]
output io_tilelink_c_valid, // @[Tilelink.scala:132:14]
output [2:0] io_tilelink_c_bits_opcode, // @[Tilelink.scala:132:14]
output [2:0] io_tilelink_c_bits_param, // @[Tilelink.scala:132:14]
output [3:0] io_tilelink_c_bits_size, // @[Tilelink.scala:132:14]
output [5:0] io_tilelink_c_bits_source, // @[Tilelink.scala:132:14]
output [31:0] io_tilelink_c_bits_address, // @[Tilelink.scala:132:14]
output [63:0] io_tilelink_c_bits_data, // @[Tilelink.scala:132:14]
output io_tilelink_c_bits_corrupt, // @[Tilelink.scala:132:14]
output io_tilelink_d_ready, // @[Tilelink.scala:132:14]
input io_tilelink_d_valid, // @[Tilelink.scala:132:14]
input [2:0] io_tilelink_d_bits_opcode, // @[Tilelink.scala:132:14]
input [1:0] io_tilelink_d_bits_param, // @[Tilelink.scala:132:14]
input [3:0] io_tilelink_d_bits_size, // @[Tilelink.scala:132:14]
input [5:0] io_tilelink_d_bits_source, // @[Tilelink.scala:132:14]
input [4:0] io_tilelink_d_bits_sink, // @[Tilelink.scala:132:14]
input io_tilelink_d_bits_denied, // @[Tilelink.scala:132:14]
input [63:0] io_tilelink_d_bits_data, // @[Tilelink.scala:132:14]
input io_tilelink_d_bits_corrupt, // @[Tilelink.scala:132:14]
output io_tilelink_e_valid, // @[Tilelink.scala:132:14]
output [4:0] io_tilelink_e_bits_sink, // @[Tilelink.scala:132:14]
output io_flits_a_ready, // @[Tilelink.scala:132:14]
input io_flits_a_valid, // @[Tilelink.scala:132:14]
input io_flits_a_bits_head, // @[Tilelink.scala:132:14]
input io_flits_a_bits_tail, // @[Tilelink.scala:132:14]
input [72:0] io_flits_a_bits_payload, // @[Tilelink.scala:132:14]
input io_flits_b_ready, // @[Tilelink.scala:132:14]
output io_flits_b_valid, // @[Tilelink.scala:132:14]
output io_flits_b_bits_head, // @[Tilelink.scala:132:14]
output io_flits_b_bits_tail, // @[Tilelink.scala:132:14]
output [72:0] io_flits_b_bits_payload, // @[Tilelink.scala:132:14]
output [5:0] io_flits_b_bits_egress_id, // @[Tilelink.scala:132:14]
output io_flits_c_ready, // @[Tilelink.scala:132:14]
input io_flits_c_valid, // @[Tilelink.scala:132:14]
input io_flits_c_bits_head, // @[Tilelink.scala:132:14]
input io_flits_c_bits_tail, // @[Tilelink.scala:132:14]
input [72:0] io_flits_c_bits_payload, // @[Tilelink.scala:132:14]
input io_flits_d_ready, // @[Tilelink.scala:132:14]
output io_flits_d_valid, // @[Tilelink.scala:132:14]
output io_flits_d_bits_head, // @[Tilelink.scala:132:14]
output io_flits_d_bits_tail, // @[Tilelink.scala:132:14]
output [72:0] io_flits_d_bits_payload, // @[Tilelink.scala:132:14]
output [5:0] io_flits_d_bits_egress_id, // @[Tilelink.scala:132:14]
input io_flits_e_valid, // @[Tilelink.scala:132:14]
input io_flits_e_bits_head, // @[Tilelink.scala:132:14]
input io_flits_e_bits_tail, // @[Tilelink.scala:132:14]
input [72:0] io_flits_e_bits_payload // @[Tilelink.scala:132:14]
);
wire [64:0] _d_io_flit_bits_payload; // @[Tilelink.scala:146:17]
TLAFromNoC_3 a ( // @[Tilelink.scala:143:17]
.clock (clock),
.reset (reset),
.io_protocol_ready (io_tilelink_a_ready),
.io_protocol_valid (io_tilelink_a_valid),
.io_protocol_bits_opcode (io_tilelink_a_bits_opcode),
.io_protocol_bits_param (io_tilelink_a_bits_param),
.io_protocol_bits_size (io_tilelink_a_bits_size),
.io_protocol_bits_source (io_tilelink_a_bits_source),
.io_protocol_bits_address (io_tilelink_a_bits_address),
.io_protocol_bits_mask (io_tilelink_a_bits_mask),
.io_protocol_bits_data (io_tilelink_a_bits_data),
.io_protocol_bits_corrupt (io_tilelink_a_bits_corrupt),
.io_flit_ready (io_flits_a_ready),
.io_flit_valid (io_flits_a_valid),
.io_flit_bits_head (io_flits_a_bits_head),
.io_flit_bits_tail (io_flits_a_bits_tail),
.io_flit_bits_payload (io_flits_a_bits_payload)
); // @[Tilelink.scala:143:17]
TLBToNoC_4 b ( // @[Tilelink.scala:144:17]
.clock (clock),
.reset (reset),
.io_protocol_ready (io_tilelink_b_ready),
.io_protocol_valid (io_tilelink_b_valid),
.io_protocol_bits_param (io_tilelink_b_bits_param),
.io_protocol_bits_source (io_tilelink_b_bits_source),
.io_protocol_bits_address (io_tilelink_b_bits_address),
.io_flit_ready (io_flits_b_ready),
.io_flit_valid (io_flits_b_valid),
.io_flit_bits_head (io_flits_b_bits_head),
.io_flit_bits_tail (io_flits_b_bits_tail),
.io_flit_bits_payload (io_flits_b_bits_payload),
.io_flit_bits_egress_id (io_flits_b_bits_egress_id)
); // @[Tilelink.scala:144:17]
TLCFromNoC_4 c ( // @[Tilelink.scala:145:17]
.clock (clock),
.reset (reset),
.io_protocol_ready (io_tilelink_c_ready),
.io_protocol_valid (io_tilelink_c_valid),
.io_protocol_bits_opcode (io_tilelink_c_bits_opcode),
.io_protocol_bits_param (io_tilelink_c_bits_param),
.io_protocol_bits_size (io_tilelink_c_bits_size),
.io_protocol_bits_source (io_tilelink_c_bits_source),
.io_protocol_bits_address (io_tilelink_c_bits_address),
.io_protocol_bits_data (io_tilelink_c_bits_data),
.io_protocol_bits_corrupt (io_tilelink_c_bits_corrupt),
.io_flit_ready (io_flits_c_ready),
.io_flit_valid (io_flits_c_valid),
.io_flit_bits_head (io_flits_c_bits_head),
.io_flit_bits_tail (io_flits_c_bits_tail),
.io_flit_bits_payload (io_flits_c_bits_payload[64:0]) // @[Tilelink.scala:156:14]
); // @[Tilelink.scala:145:17]
TLDToNoC_4 d ( // @[Tilelink.scala:146:17]
.clock (clock),
.reset (reset),
.io_protocol_ready (io_tilelink_d_ready),
.io_protocol_valid (io_tilelink_d_valid),
.io_protocol_bits_opcode (io_tilelink_d_bits_opcode),
.io_protocol_bits_param (io_tilelink_d_bits_param),
.io_protocol_bits_size (io_tilelink_d_bits_size),
.io_protocol_bits_source (io_tilelink_d_bits_source),
.io_protocol_bits_sink (io_tilelink_d_bits_sink),
.io_protocol_bits_denied (io_tilelink_d_bits_denied),
.io_protocol_bits_data (io_tilelink_d_bits_data),
.io_protocol_bits_corrupt (io_tilelink_d_bits_corrupt),
.io_flit_ready (io_flits_d_ready),
.io_flit_valid (io_flits_d_valid),
.io_flit_bits_head (io_flits_d_bits_head),
.io_flit_bits_tail (io_flits_d_bits_tail),
.io_flit_bits_payload (_d_io_flit_bits_payload),
.io_flit_bits_egress_id (io_flits_d_bits_egress_id)
); // @[Tilelink.scala:146:17]
TLEFromNoC_4 e ( // @[Tilelink.scala:147:17]
.clock (clock),
.reset (reset),
.io_protocol_valid (io_tilelink_e_valid),
.io_protocol_bits_sink (io_tilelink_e_bits_sink),
.io_flit_valid (io_flits_e_valid),
.io_flit_bits_head (io_flits_e_bits_head),
.io_flit_bits_tail (io_flits_e_bits_tail),
.io_flit_bits_payload (io_flits_e_bits_payload[4:0]) // @[Tilelink.scala:158:14]
); // @[Tilelink.scala:147:17]
assign io_flits_d_bits_payload = {8'h0, _d_io_flit_bits_payload}; // @[Tilelink.scala:125:7, :146:17, :157:14]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File UARTRx.scala:
package sifive.blocks.devices.uart
import chisel3._
import chisel3.util._
import freechips.rocketchip.util._
/** UARTRx module recivies serial input from Rx port and transmits them to Rx fifo in parallel
*
* ==Datapass==
* Port(Rx) -> sample -> shifter -> Rx fifo -> TL bus
*
* ==Structure==
* - baud rate divisor counter:
* generate pulse, the enable signal for sample and data shift
* - sample counter:
* sample happens in middle
* - data counter
* control signals for data shift process
* - sample and data shift logic
*
* ==State Machine==
* s_idle: detect start bit, init data_count and sample count, start pulse counter
* s_data: data reciving
*
* @note Rx fifo transmits Rx data to TL bus
*/
class UARTRx(c: UARTParams) extends Module {
val io = IO(new Bundle {
/** enable signal from top */
val en = Input(Bool())
/** input data from rx port */
val in = Input(UInt(1.W))
/** output data to Rx fifo */
val out = Valid(UInt(c.dataBits.W))
/** divisor bits */
val div = Input(UInt(c.divisorBits.W))
/** parity enable */
val enparity = c.includeParity.option(Input(Bool()))
/** parity select
*
* 0 -> even parity
* 1 -> odd parity
*/
val parity = c.includeParity.option(Input(Bool()))
/** parity error bit */
val errorparity = c.includeParity.option(Output(Bool()))
/** databit select
*
* ture -> 8
* false -> 9
*/
val data8or9 = (c.dataBits == 9).option(Input(Bool()))
})
if (c.includeParity)
io.errorparity.get := false.B
val debounce = RegInit(0.U(2.W))
val debounce_max = (debounce === 3.U)
val debounce_min = (debounce === 0.U)
val prescaler = Reg(UInt((c.divisorBits - c.oversample + 1).W))
val start = WireDefault(false.B)
/** enable signal for sampling and data shifting */
val pulse = (prescaler === 0.U)
private val dataCountBits = log2Floor(c.dataBits+c.includeParity.toInt) + 1
/** init = data bits(8 or 9) + parity bit(0 or 1) + start bit(1) */
val data_count = Reg(UInt(dataCountBits.W))
val data_last = (data_count === 0.U)
val parity_bit = (data_count === 1.U) && io.enparity.getOrElse(false.B)
val sample_count = Reg(UInt(c.oversample.W))
val sample_mid = (sample_count === ((c.oversampleFactor - c.nSamples + 1) >> 1).U)
// todo unused
val sample_last = (sample_count === 0.U)
/** counter for data and sample
*
* {{{
* | data_count | sample_count |
* }}}
*/
val countdown = Cat(data_count, sample_count) - 1.U
// Compensate for the divisor not being a multiple of the oversampling period.
// Let remainder k = (io.div % c.oversampleFactor).
// For the last k samples, extend the sampling delay by 1 cycle.
val remainder = io.div(c.oversample-1, 0)
val extend = (sample_count < remainder) // Pad head: (sample_count > ~remainder)
/** prescaler reset signal
*
* conditions:
* {{{
* start : transmisson starts
* pulse : returns ture every pluse counter period
* }}}
*/
val restore = start || pulse
val prescaler_in = Mux(restore, io.div >> c.oversample, prescaler)
val prescaler_next = prescaler_in - Mux(restore && extend, 0.U, 1.U)
/** buffer for sample results */
val sample = Reg(UInt(c.nSamples.W))
// take the majority bit of sample buffer
val voter = Majority(sample.asBools.toSet)
// data buffer
val shifter = Reg(UInt(c.dataBits.W))
val valid = RegInit(false.B)
valid := false.B
io.out.valid := valid
io.out.bits := (if (c.dataBits == 8) shifter else Mux(io.data8or9.get, Cat(0.U, shifter(8,1)), shifter))
val (s_idle :: s_data :: Nil) = Enum(2)
val state = RegInit(s_idle)
switch (state) {
is (s_idle) {
// todo !(!io.in)?
when (!(!io.in) && !debounce_min) {
debounce := debounce - 1.U
}
when (!io.in) {
debounce := debounce + 1.U
when (debounce_max) {
state := s_data
start := true.B
prescaler := prescaler_next
// init data_count
data_count := (c.dataBits+1).U + (if (c.includeParity) io.enparity.get else 0.U) - io.data8or9.getOrElse(false.B).asUInt
// init sample_count = 15
sample_count := (c.oversampleFactor - 1).U
}
}
}
is (s_data) {
prescaler := prescaler_next
when (pulse) {
// sample scan in
sample := Cat(sample, io.in)
data_count := countdown >> c.oversample
sample_count := countdown(c.oversample-1, 0)
when (sample_mid) {
if (c.includeParity) {
// act according to frame bit stage at its respective sampling point
// check parity bit for error
when (parity_bit) {
io.errorparity.get := (shifter.asBools.reduce(_ ^ _) ^ voter ^ io.parity.get)
}
when (data_last) {
state := s_idle
valid := true.B
} .elsewhen (!parity_bit) {
// do not add parity bit to final rx data
shifter := Cat(voter, shifter >> 1)
}
} else {
when (data_last) {
state := s_idle
valid := true.B
} .otherwise {
shifter := Cat(voter, shifter >> 1)
}
}
}
}
}
}
when (!io.en) {
debounce := 0.U
}
}
/*
Copyright 2016 SiFive, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
| module UARTRx( // @[UARTRx.scala:28:7]
input clock, // @[UARTRx.scala:28:7]
input reset, // @[UARTRx.scala:28:7]
input io_en, // @[UARTRx.scala:29:14]
input io_in, // @[UARTRx.scala:29:14]
output io_out_valid, // @[UARTRx.scala:29:14]
output [7:0] io_out_bits, // @[UARTRx.scala:29:14]
input [15:0] io_div // @[UARTRx.scala:29:14]
);
wire io_en_0 = io_en; // @[UARTRx.scala:28:7]
wire io_in_0 = io_in; // @[UARTRx.scala:28:7]
wire [15:0] io_div_0 = io_div; // @[UARTRx.scala:28:7]
wire parity_bit = 1'h0; // @[UARTRx.scala:72:41]
wire [4:0] _data_count_T = 5'h9; // @[UARTRx.scala:129:42]
wire [4:0] _data_count_T_2 = 5'h9; // @[UARTRx.scala:129:92]
wire [3:0] _data_count_T_1 = 4'h9; // @[UARTRx.scala:129:{42,92}]
wire [3:0] _data_count_T_3 = 4'h9; // @[UARTRx.scala:129:{42,92}]
wire io_out_valid_0; // @[UARTRx.scala:28:7]
wire [7:0] io_out_bits_0; // @[UARTRx.scala:28:7]
reg [1:0] debounce; // @[UARTRx.scala:59:25]
wire debounce_max = &debounce; // @[UARTRx.scala:59:25, :60:32]
wire debounce_min = debounce == 2'h0; // @[UARTRx.scala:59:25, :61:32]
reg [12:0] prescaler; // @[UARTRx.scala:63:22]
wire start; // @[UARTRx.scala:64:26]
wire pulse = prescaler == 13'h0; // @[UARTRx.scala:63:22, :66:26]
reg [3:0] data_count; // @[UARTRx.scala:70:23]
wire data_last = data_count == 4'h0; // @[UARTRx.scala:70:23, :71:31]
wire _parity_bit_T = data_count == 4'h1; // @[UARTRx.scala:70:23, :72:32]
reg [3:0] sample_count; // @[UARTRx.scala:73:25]
wire sample_mid = sample_count == 4'h7; // @[UARTRx.scala:73:25, :74:34]
wire sample_last = sample_count == 4'h0; // @[UARTRx.scala:71:31, :73:25, :76:35]
wire [7:0] _countdown_T = {data_count, sample_count}; // @[UARTRx.scala:70:23, :73:25, :83:22]
wire [8:0] _countdown_T_1 = {1'h0, _countdown_T} - 9'h1; // @[UARTRx.scala:83:{22,49}]
wire [7:0] countdown = _countdown_T_1[7:0]; // @[UARTRx.scala:83:49]
wire [3:0] remainder = io_div_0[3:0]; // @[UARTRx.scala:28:7, :88:25]
wire extend = sample_count < remainder; // @[UARTRx.scala:73:25, :88:25, :89:30]
wire restore = start | pulse; // @[UARTRx.scala:64:26, :66:26, :98:23]
wire [11:0] _prescaler_in_T = io_div_0[15:4]; // @[UARTRx.scala:28:7, :99:42]
wire [12:0] prescaler_in = restore ? {1'h0, _prescaler_in_T} : prescaler; // @[UARTRx.scala:63:22, :98:23, :99:{25,42}]
wire _prescaler_next_T = restore & extend; // @[UARTRx.scala:89:30, :98:23, :100:51]
wire _prescaler_next_T_1 = ~_prescaler_next_T; // @[UARTRx.scala:100:{42,51}]
wire [13:0] _prescaler_next_T_2 = {1'h0, prescaler_in} - {13'h0, _prescaler_next_T_1}; // @[UARTRx.scala:66:26, :99:25, :100:{37,42}]
wire [12:0] prescaler_next = _prescaler_next_T_2[12:0]; // @[UARTRx.scala:100:37]
reg [2:0] sample; // @[UARTRx.scala:102:19]
wire _voter_T = sample[0]; // @[UARTRx.scala:102:19, :104:31]
wire _voter_T_1 = sample[1]; // @[UARTRx.scala:102:19, :104:31]
wire _voter_T_2 = sample[2]; // @[UARTRx.scala:102:19, :104:31]
wire _voter_T_3 = _voter_T & _voter_T_1; // @[Misc.scala:167:48]
wire _voter_T_4 = _voter_T & _voter_T_2; // @[Misc.scala:167:48]
wire _voter_T_5 = _voter_T_3 | _voter_T_4; // @[Misc.scala:167:48, :168:22]
wire _voter_T_6 = _voter_T_1 & _voter_T_2; // @[Misc.scala:167:48]
wire voter = _voter_T_5 | _voter_T_6; // @[Misc.scala:167:48, :168:22]
reg [7:0] shifter; // @[UARTRx.scala:106:20]
assign io_out_bits_0 = shifter; // @[UARTRx.scala:28:7, :106:20]
reg valid; // @[UARTRx.scala:108:22]
assign io_out_valid_0 = valid; // @[UARTRx.scala:28:7, :108:22]
reg state; // @[UARTRx.scala:114:22]
wire [2:0] _GEN = {1'h0, debounce}; // @[UARTRx.scala:59:25, :120:30]
wire [2:0] _debounce_T = _GEN - 3'h1; // @[UARTRx.scala:120:30]
wire [1:0] _debounce_T_1 = _debounce_T[1:0]; // @[UARTRx.scala:120:30]
wire [2:0] _debounce_T_2 = _GEN + 3'h1; // @[UARTRx.scala:120:30, :123:30]
wire [1:0] _debounce_T_3 = _debounce_T_2[1:0]; // @[UARTRx.scala:123:30]
assign start = ~state & ~io_in_0 & debounce_max; // @[UARTRx.scala:28:7, :60:32, :64:26, :114:22, :116:18, :119:15, :122:21, :124:29]
wire [3:0] _sample_T = {sample, io_in_0}; // @[UARTRx.scala:28:7, :102:19, :140:22]
wire [3:0] _data_count_T_4 = countdown[7:4]; // @[UARTRx.scala:83:49, :141:33]
wire [3:0] _sample_count_T = countdown[3:0]; // @[UARTRx.scala:83:49, :142:34]
wire [6:0] _shifter_T = shifter[7:1]; // @[UARTRx.scala:106:20, :163:45]
wire [7:0] _shifter_T_1 = {voter, _shifter_T}; // @[Misc.scala:168:22]
wire _GEN_0 = ~io_in_0 & debounce_max; // @[UARTRx.scala:28:7, :60:32, :114:22, :119:15, :122:21, :124:29, :125:17]
wire _GEN_1 = _GEN_0 | state; // @[UARTRx.scala:114:22, :122:21, :124:29, :125:17]
wire _GEN_2 = state & pulse; // @[UARTRx.scala:66:26, :102:19, :114:22, :116:18, :138:20, :140:16]
wire _GEN_3 = state & pulse & sample_mid; // @[UARTRx.scala:66:26, :74:34, :109:9, :114:22, :116:18, :138:20, :144:27, :159:30]
always @(posedge clock) begin // @[UARTRx.scala:28:7]
if (reset) begin // @[UARTRx.scala:28:7]
debounce <= 2'h0; // @[UARTRx.scala:59:25]
valid <= 1'h0; // @[UARTRx.scala:108:22]
state <= 1'h0; // @[UARTRx.scala:114:22]
end
else begin // @[UARTRx.scala:28:7]
if (io_en_0) begin // @[UARTRx.scala:28:7]
if (state) begin // @[UARTRx.scala:114:22]
end
else if (io_in_0) begin // @[UARTRx.scala:28:7]
if (io_in_0 & ~debounce_min) // @[UARTRx.scala:28:7, :61:32, :119:{23,26}]
debounce <= _debounce_T_1; // @[UARTRx.scala:59:25, :120:30]
end
else // @[UARTRx.scala:28:7]
debounce <= _debounce_T_3; // @[UARTRx.scala:59:25, :123:30]
end
else // @[UARTRx.scala:28:7]
debounce <= 2'h0; // @[UARTRx.scala:59:25]
valid <= _GEN_3 & data_last; // @[UARTRx.scala:71:31, :108:22, :109:9, :116:18, :138:20, :144:27, :159:30]
if (state) // @[UARTRx.scala:114:22]
state <= ~(state & pulse & sample_mid & data_last) & state; // @[UARTRx.scala:66:26, :71:31, :74:34, :114:22, :116:18, :138:20, :144:27, :159:30, :160:21]
else // @[UARTRx.scala:114:22]
state <= _GEN_1; // @[UARTRx.scala:114:22, :122:21, :124:29, :125:17]
end
if (_GEN_1) // @[UARTRx.scala:114:22, :116:18, :122:21, :124:29, :125:17]
prescaler <= prescaler_next; // @[UARTRx.scala:63:22, :100:37]
if (state) begin // @[UARTRx.scala:114:22]
if (_GEN_2) begin // @[UARTRx.scala:102:19, :116:18, :138:20, :140:16]
data_count <= _data_count_T_4; // @[UARTRx.scala:70:23, :141:33]
sample_count <= _sample_count_T; // @[UARTRx.scala:73:25, :142:34]
end
end
else if (_GEN_0) begin // @[UARTRx.scala:114:22, :122:21, :124:29, :125:17]
data_count <= 4'h9; // @[UARTRx.scala:70:23, :129:{42,92}]
sample_count <= 4'hF; // @[UARTRx.scala:73:25]
end
if (_GEN_2) // @[UARTRx.scala:102:19, :116:18, :138:20, :140:16]
sample <= _sample_T[2:0]; // @[UARTRx.scala:102:19, :140:{16,22}]
if (~state | ~_GEN_3 | data_last) begin // @[UARTRx.scala:71:31, :106:20, :109:9, :114:22, :116:18, :138:20, :144:27, :159:30]
end
else // @[UARTRx.scala:106:20, :116:18, :138:20, :144:27, :159:30]
shifter <= _shifter_T_1; // @[UARTRx.scala:106:20, :163:29]
always @(posedge)
assign io_out_valid = io_out_valid_0; // @[UARTRx.scala:28:7]
assign io_out_bits = io_out_bits_0; // @[UARTRx.scala:28:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_33( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19]
wire _sync_2_T = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h1; // @[SynchronizerReg.scala:51:87, :54:22, :68:19]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File UnsafeAXI4ToTL.scala:
package ara
import chisel3._
import chisel3.util._
import freechips.rocketchip.amba._
import freechips.rocketchip.amba.axi4._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle {
val data = UInt(dataWidth.W)
val resp = UInt(respWidth.W)
val last = Bool()
val user = BundleMap(userFields)
}
/** Parameters for [[BaseReservableListBuffer]] and all child classes.
*
* @param numEntries Total number of elements that can be stored in the 'data' RAM
* @param numLists Maximum number of linked lists
* @param numBeats Maximum number of beats per entry
*/
case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) {
// Avoid zero-width wires when we call 'log2Ceil'
val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries)
val listBits = if (numLists == 1) 1 else log2Ceil(numLists)
val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats)
}
case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName)
extends MixedAdapterNode(AXI4Imp, TLImp)(
dFn = { case mp =>
TLMasterPortParameters.v2(
masters = mp.masters.zipWithIndex.map { case (m, i) =>
// Support 'numTlTxns' read requests and 'numTlTxns' write requests at once.
val numSourceIds = numTlTxns * 2
TLMasterParameters.v2(
name = m.name,
sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds),
nodePath = m.nodePath
)
},
echoFields = mp.echoFields,
requestFields = AMBAProtField() +: mp.requestFields,
responseKeys = mp.responseKeys
)
},
uFn = { mp =>
AXI4SlavePortParameters(
slaves = mp.managers.map { m =>
val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits))
AXI4SlaveParameters(
address = m.address,
resources = m.resources,
regionType = m.regionType,
executable = m.executable,
nodePath = m.nodePath,
supportsWrite = m.supportsPutPartial.intersect(maxXfer),
supportsRead = m.supportsGet.intersect(maxXfer),
interleavedId = Some(0) // TL2 never interleaves D beats
)
},
beatBytes = mp.beatBytes,
minLatency = mp.minLatency,
responseFields = mp.responseFields,
requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt)
)
}
)
class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule {
require(numTlTxns >= 1)
require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2")
val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt)
lazy val module = new LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
edgeIn.master.masters.foreach { m =>
require(m.aligned, "AXI4ToTL requires aligned requests")
}
val numIds = edgeIn.master.endId
val beatBytes = edgeOut.slave.beatBytes
val maxTransfer = edgeOut.slave.maxTransfer
val maxBeats = maxTransfer / beatBytes
// Look for an Error device to redirect bad requests
val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError")
require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.")
val errorDev = errorDevs.maxBy(_.maxTransfer)
val errorDevAddr = errorDev.address.head.base
require(
errorDev.supportsPutPartial.contains(maxTransfer),
s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer"
)
require(
errorDev.supportsGet.contains(maxTransfer),
s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer"
)
// All of the read-response reordering logic.
val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields)
val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats)
val listBuffer = if (numTlTxns > 1) {
Module(new ReservableListBuffer(listBufData, listBufParams))
} else {
Module(new PassthroughListBuffer(listBufData, listBufParams))
}
// To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to
// 0 for read requests and 1 for write requests.
val isReadSourceBit = 0.U(1.W)
val isWriteSourceBit = 1.U(1.W)
/* Read request logic */
val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val rBytes1 = in.ar.bits.bytes1()
val rSize = OH1ToUInt(rBytes1)
val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize)
val rId = if (numTlTxns > 1) {
Cat(isReadSourceBit, listBuffer.ioReservedIndex)
} else {
isReadSourceBit
}
val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Indicates if there are still valid TileLink source IDs left to use.
val canIssueR = listBuffer.ioReserve.ready
listBuffer.ioReserve.bits := in.ar.bits.id
listBuffer.ioReserve.valid := in.ar.valid && rOut.ready
in.ar.ready := rOut.ready && canIssueR
rOut.valid := in.ar.valid && canIssueR
rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2
rOut.bits.user :<= in.ar.bits.user
rOut.bits.user.lift(AMBAProt).foreach { rProt =>
rProt.privileged := in.ar.bits.prot(0)
rProt.secure := !in.ar.bits.prot(1)
rProt.fetch := in.ar.bits.prot(2)
rProt.bufferable := in.ar.bits.cache(0)
rProt.modifiable := in.ar.bits.cache(1)
rProt.readalloc := in.ar.bits.cache(2)
rProt.writealloc := in.ar.bits.cache(3)
}
/* Write request logic */
// Strip off the MSB, which identifies the transaction as read vs write.
val strippedResponseSourceId = if (numTlTxns > 1) {
out.d.bits.source((out.d.bits.source).getWidth - 2, 0)
} else {
// When there's only 1 TileLink transaction allowed for read/write, then this field is always 0.
0.U(1.W)
}
// Track when a write request burst is in progress.
val writeBurstBusy = RegInit(false.B)
when(in.w.fire) {
writeBurstBusy := !in.w.bits.last
}
val usedWriteIds = RegInit(0.U(numTlTxns.W))
val canIssueW = !usedWriteIds.andR
val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W))
val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W))
usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet
// Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't
// change mid-burst.
val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W))
val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy
val freeWriteIdIndex = OHToUInt(freeWriteIdOH)
freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds
val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val wBytes1 = in.aw.bits.bytes1()
val wSize = OH1ToUInt(wBytes1)
val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize)
val wId = if (numTlTxns > 1) {
Cat(isWriteSourceBit, freeWriteIdIndex)
} else {
isWriteSourceBit
}
val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain
// asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but
// the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb
// bits during a W-channel burst.
in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW
in.w.ready := wOut.ready && in.aw.valid && canIssueW
wOut.valid := in.aw.valid && in.w.valid && canIssueW
wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2
in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ }
wOut.bits.user :<= in.aw.bits.user
wOut.bits.user.lift(AMBAProt).foreach { wProt =>
wProt.privileged := in.aw.bits.prot(0)
wProt.secure := !in.aw.bits.prot(1)
wProt.fetch := in.aw.bits.prot(2)
wProt.bufferable := in.aw.bits.cache(0)
wProt.modifiable := in.aw.bits.cache(1)
wProt.readalloc := in.aw.bits.cache(2)
wProt.writealloc := in.aw.bits.cache(3)
}
// Merge the AXI4 read/write requests into the TL-A channel.
TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut))
/* Read/write response logic */
val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle)))
val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle)))
val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY)
val dHasData = edgeOut.hasData(out.d.bits)
val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d)
val dNumBeats1 = edgeOut.numBeats1(out.d.bits)
// Handle cases where writeack arrives before write is done
val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U
out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck)
listBuffer.ioDataOut.ready := okR.ready
okR.valid := listBuffer.ioDataOut.valid
okB.valid := out.d.valid && !dHasData && !writeEarlyAck
listBuffer.ioResponse.valid := out.d.valid && dHasData
listBuffer.ioResponse.bits.index := strippedResponseSourceId
listBuffer.ioResponse.bits.data.data := out.d.bits.data
listBuffer.ioResponse.bits.data.resp := dResp
listBuffer.ioResponse.bits.data.last := dLast
listBuffer.ioResponse.bits.data.user :<= out.d.bits.user
listBuffer.ioResponse.bits.count := dCount
listBuffer.ioResponse.bits.numBeats1 := dNumBeats1
okR.bits.id := listBuffer.ioDataOut.bits.listIndex
okR.bits.data := listBuffer.ioDataOut.bits.payload.data
okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp
okR.bits.last := listBuffer.ioDataOut.bits.payload.last
okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user
// Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write
// response, mark the write transaction as complete.
val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W))
val writeResponseId = writeIdMap.read(strippedResponseSourceId)
when(wOut.fire) {
writeIdMap.write(freeWriteIdIndex, in.aw.bits.id)
}
when(edgeOut.done(wOut)) {
usedWriteIdsSet := freeWriteIdOH
}
when(okB.fire) {
usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns)
}
okB.bits.id := writeResponseId
okB.bits.resp := dResp
okB.bits.user :<= out.d.bits.user
// AXI4 needs irrevocable behaviour
in.r <> Queue.irrevocable(okR, 1, flow = true)
in.b <> Queue.irrevocable(okB, 1, flow = true)
// Unused channels
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
/* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */
def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = {
val lReqType = reqType.toLowerCase
when(a.valid) {
assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U)
// Narrow transfers and FIXED bursts must be single-beat bursts.
when(a.bits.len =/= 0.U) {
assert(
a.bits.size === log2Ceil(beatBytes).U,
s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)",
1.U << a.bits.size,
a.bits.len + 1.U
)
assert(
a.bits.burst =/= AXI4Parameters.BURST_FIXED,
s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)",
a.bits.len + 1.U
)
}
// Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in
// particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink
// Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts.
}
}
checkRequest(in.ar, "Read")
checkRequest(in.aw, "Write")
}
}
}
object UnsafeAXI4ToTL {
def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = {
val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt))
axi42tl.node
}
}
/* ReservableListBuffer logic, and associated classes. */
class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle {
val index = UInt(params.entryBits.W)
val count = UInt(params.beatBits.W)
val numBeats1 = UInt(params.beatBits.W)
}
class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle {
val listIndex = UInt(params.listBits.W)
}
/** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */
abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends Module {
require(params.numEntries > 0)
require(params.numLists > 0)
val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W))))
val ioReservedIndex = IO(Output(UInt(params.entryBits.W)))
val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params))))
val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params)))
}
/** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve
* linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the
* 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a
* given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order.
*
* ==Constructor==
* @param gen Chisel type of linked list data element
* @param params Other parameters
*
* ==Module IO==
* @param ioReserve Index of list to reserve a new element in
* @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire'
* @param ioResponse Payload containing response data and linked-list-entry index
* @param ioDataOut Payload containing data read from response linked list and linked list index
*/
class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
val valid = RegInit(0.U(params.numLists.W))
val head = Mem(params.numLists, UInt(params.entryBits.W))
val tail = Mem(params.numLists, UInt(params.entryBits.W))
val used = RegInit(0.U(params.numEntries.W))
val next = Mem(params.numEntries, UInt(params.entryBits.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) }
val dataIsPresent = RegInit(0.U(params.numEntries.W))
val beats = Mem(params.numEntries, UInt(params.beatBits.W))
// The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower.
val dataMemReadEnable = WireDefault(false.B)
val dataMemWriteEnable = WireDefault(false.B)
assert(!(dataMemReadEnable && dataMemWriteEnable))
// 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the
// lowest-index entry in the 'data' RAM which is free.
val freeOH = Wire(UInt(params.numEntries.W))
val freeIndex = OHToUInt(freeOH)
freeOH := ~(leftOR(~used) << 1) & ~used
ioReservedIndex := freeIndex
val validSet = WireDefault(0.U(params.numLists.W))
val validClr = WireDefault(0.U(params.numLists.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
val dataIsPresentSet = WireDefault(0.U(params.numEntries.W))
val dataIsPresentClr = WireDefault(0.U(params.numEntries.W))
valid := (valid & ~validClr) | validSet
used := (used & ~usedClr) | usedSet
dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet
/* Reservation logic signals */
val reserveTail = Wire(UInt(params.entryBits.W))
val reserveIsValid = Wire(Bool())
/* Response logic signals */
val responseIndex = Wire(UInt(params.entryBits.W))
val responseListIndex = Wire(UInt(params.listBits.W))
val responseHead = Wire(UInt(params.entryBits.W))
val responseTail = Wire(UInt(params.entryBits.W))
val nextResponseHead = Wire(UInt(params.entryBits.W))
val nextDataIsPresent = Wire(Bool())
val isResponseInOrder = Wire(Bool())
val isEndOfList = Wire(Bool())
val isLastBeat = Wire(Bool())
val isLastResponseBeat = Wire(Bool())
val isLastUnwindBeat = Wire(Bool())
/* Reservation logic */
reserveTail := tail.read(ioReserve.bits)
reserveIsValid := valid(ioReserve.bits)
ioReserve.ready := !used.andR
// When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we
// actually start a new list, rather than appending to a list that's about to disappear.
val reserveResponseSameList = ioReserve.bits === responseListIndex
val appendToAndDestroyList =
ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat
when(ioReserve.fire) {
validSet := UIntToOH(ioReserve.bits, params.numLists)
usedSet := freeOH
when(reserveIsValid && !appendToAndDestroyList) {
next.write(reserveTail, freeIndex)
}.otherwise {
head.write(ioReserve.bits, freeIndex)
}
tail.write(ioReserve.bits, freeIndex)
map.write(freeIndex, ioReserve.bits)
}
/* Response logic */
// The majority of the response logic (reading from and writing to the various RAMs) is common between the
// response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid).
// The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the
// 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and
// response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after
// two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker.
responseHead := head.read(responseListIndex)
responseTail := tail.read(responseListIndex)
nextResponseHead := next.read(responseIndex)
nextDataIsPresent := dataIsPresent(nextResponseHead)
// Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since
// there isn't a next element in the linked list.
isResponseInOrder := responseHead === responseIndex
isEndOfList := responseHead === responseTail
isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1
// When a response's last beat is sent to the output channel, mark it as completed. This can happen in two
// situations:
// 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM
// reservation was never needed.
// 2. An entry is read out of the 'data' SRAM (within the unwind FSM).
when(ioDataOut.fire && isLastBeat) {
// Mark the reservation as no-longer-used.
usedClr := UIntToOH(responseIndex, params.numEntries)
// If the response is in-order, then we're popping an element from this linked list.
when(isEndOfList) {
// Once we pop the last element from a linked list, mark it as no-longer-present.
validClr := UIntToOH(responseListIndex, params.numLists)
}.otherwise {
// Move the linked list's head pointer to the new head pointer.
head.write(responseListIndex, nextResponseHead)
}
}
// If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding.
when(ioResponse.fire && !isResponseInOrder) {
dataMemWriteEnable := true.B
when(isLastResponseBeat) {
dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries)
beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1)
}
}
// Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to.
val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats)
(responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) =>
when(select && dataMemWriteEnable) {
seqMem.write(ioResponse.bits.index, ioResponse.bits.data)
}
}
/* Response unwind logic */
// Unwind FSM state definitions
val sIdle :: sUnwinding :: Nil = Enum(2)
val unwindState = RegInit(sIdle)
val busyUnwinding = unwindState === sUnwinding
val startUnwind = Wire(Bool())
val stopUnwind = Wire(Bool())
when(startUnwind) {
unwindState := sUnwinding
}.elsewhen(stopUnwind) {
unwindState := sIdle
}
assert(!(startUnwind && stopUnwind))
// Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to
// become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is
// invalid.
//
// Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to
// worry about overwriting the 'data' SRAM's output when we start the unwind FSM.
startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent
// Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of
// two things happens:
// 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent)
// 2. There are no more outstanding responses in this list (isEndOfList)
//
// Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are
// passing from 'ioResponse' to 'ioDataOut'.
stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList)
val isUnwindBurstOver = Wire(Bool())
val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable)
// Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of
// beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we
// increment 'beatCounter' until it reaches 'unwindBeats1'.
val unwindBeats1 = Reg(UInt(params.beatBits.W))
val nextBeatCounter = Wire(UInt(params.beatBits.W))
val beatCounter = RegNext(nextBeatCounter)
isUnwindBurstOver := beatCounter === unwindBeats1
when(startNewBurst) {
unwindBeats1 := beats.read(nextResponseHead)
nextBeatCounter := 0.U
}.elsewhen(dataMemReadEnable) {
nextBeatCounter := beatCounter + 1.U
}.otherwise {
nextBeatCounter := beatCounter
}
// When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next
// entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which
// happens at the start of reading a new stored burst).
val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst)
responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index)
// Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the
// SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead
// holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'.
val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex)
// The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid
// until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle).
val unwindDataIsValid = RegInit(false.B)
when(dataMemReadEnable) {
unwindDataIsValid := true.B
}.elsewhen(ioDataOut.fire) {
unwindDataIsValid := false.B
}
isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid
// Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats.
isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat)
// Select which SRAM to read from based on the beat counter.
val dataOutputVec = Wire(Vec(params.numBeats, gen))
val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats)
(nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) =>
dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable)
}
// Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured
// by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading
// from.
val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable)
// Mark 'data' burst entries as no-longer-present as they get read out of the SRAM.
when(dataMemReadEnable) {
dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries)
}
// As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue
// a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know
// we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be
// consumed by the output channel).
val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready
dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem)
// While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need
// 'responseListIndex' to be coherent for the entire unwind process.
val rawResponseListIndex = map.read(responseIndex)
val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst)
responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex)
// Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are
// just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that
// could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be
// single-ported.
ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding
// Either pass an in-order response to the output channel, or data read from the unwind FSM.
ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder)
ioDataOut.bits.listIndex := responseListIndex
ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data)
// It's an error to get a response that isn't associated with a valid linked list.
when(ioResponse.fire || unwindDataIsValid) {
assert(
valid(responseListIndex),
"No linked list exists at index %d, mapped from %d",
responseListIndex,
responseIndex
)
}
when(busyUnwinding && dataMemReadEnable) {
assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order")
}
}
/** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1.
*
* Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to
* reorder any responses, or store any linked lists.
*/
class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1")
val used = RegInit(0.U(params.numEntries.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
used := (used & ~usedClr) | usedSet
ioReserve.ready := used === 0.U
// Store which list index was reserved, we need to return this value when we get a response.
when(ioReserve.fire) {
usedSet := 1.U
map.write(0.U, ioReserve.bits)
}
// There's only one valid linked list entry, which is at index 0.
ioReservedIndex := 0.U
val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1
// Mark the linked list as empty when we get the last beat in a response.
// Note that 'ioResponse.fire === ioDataOut.fire'.
when(ioResponse.fire && isLastResponseBeat) {
usedClr := 1.U
}
// Always pass the response data straight through, since we never need to reorder the response data.
ioDataOut.bits.listIndex := map.read(0.U)
ioDataOut.bits.payload := ioResponse.bits.data
ioDataOut.valid := ioResponse.valid
ioResponse.ready := ioDataOut.ready
}
| module dataMems_27( // @[UnsafeAXI4ToTL.scala:365:62]
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [66:0] R0_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [66:0] W0_data
);
dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62]
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data)
); // @[UnsafeAXI4ToTL.scala:365:62]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_426( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19]
wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File UserYanker.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.amba.axi4
import chisel3._
import chisel3.util.{Queue, QueueIO, UIntToOH}
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.lazymodule.{LazyModule, LazyModuleImp}
import freechips.rocketchip.util.BundleMap
/** This adapter prunes all user bit fields of the echo type from request messages,
* storing them in queues and echoing them back when matching response messages are received.
*
* It also optionally rate limits the number of transactions that can be in flight simultaneously
* per FIFO domain / A[W|R]ID.
*
* @param capMaxFlight is an optional maximum number of transactions that can be in flight per A[W|R]ID.
*/
class AXI4UserYanker(capMaxFlight: Option[Int] = None)(implicit p: Parameters) extends LazyModule
{
val node = AXI4AdapterNode(
masterFn = { mp => mp.copy(
masters = mp.masters.map { m => m.copy(
maxFlight = (m.maxFlight, capMaxFlight) match {
case (Some(x), Some(y)) => Some(x min y)
case (Some(x), None) => Some(x)
case (None, Some(y)) => Some(y)
case (None, None) => None })},
echoFields = Nil)},
slaveFn = { sp => sp })
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
// Which fields are we stripping?
val echoFields = edgeIn.master.echoFields
val need_bypass = edgeOut.slave.minLatency < 1
edgeOut.master.masters.foreach { m =>
require (m.maxFlight.isDefined, "UserYanker needs a flight cap on each ID")
}
def queue(id: Int) = {
val depth = edgeOut.master.masters.find(_.id.contains(id)).flatMap(_.maxFlight).getOrElse(0)
if (depth == 0) {
Wire(new QueueIO(BundleMap(echoFields), 1)) // unused ID => undefined value
} else {
Module(new Queue(BundleMap(echoFields), depth, flow=need_bypass)).io
}
}
val rqueues = Seq.tabulate(edgeIn.master.endId) { i => queue(i) }
val wqueues = Seq.tabulate(edgeIn.master.endId) { i => queue(i) }
val arid = in.ar.bits.id
val ar_ready = VecInit(rqueues.map(_.enq.ready))(arid)
in .ar.ready := out.ar.ready && ar_ready
out.ar.valid := in .ar.valid && ar_ready
Connectable.waiveUnmatched(out.ar.bits, in.ar.bits) match {
case (lhs, rhs) => lhs :<= rhs
}
val rid = out.r.bits.id
val r_valid = VecInit(rqueues.map(_.deq.valid))(rid)
val r_bits = VecInit(rqueues.map(_.deq.bits))(rid)
assert (!out.r.valid || r_valid) // Q must be ready faster than the response
Connectable.waiveUnmatched(in.r, out.r) match {
case (lhs, rhs) => lhs :<>= rhs
}
in.r.bits.echo :<= r_bits
val arsel = UIntToOH(arid, edgeIn.master.endId).asBools
val rsel = UIntToOH(rid, edgeIn.master.endId).asBools
(rqueues zip (arsel zip rsel)) foreach { case (q, (ar, r)) =>
q.deq.ready := out.r .valid && in .r .ready && r && out.r.bits.last
q.deq.valid := DontCare
q.deq.bits := DontCare
q.enq.valid := in .ar.valid && out.ar.ready && ar
q.enq.ready := DontCare
q.enq.bits :<>= in.ar.bits.echo
q.count := DontCare
}
val awid = in.aw.bits.id
val aw_ready = VecInit(wqueues.map(_.enq.ready))(awid)
in .aw.ready := out.aw.ready && aw_ready
out.aw.valid := in .aw.valid && aw_ready
Connectable.waiveUnmatched(out.aw.bits, in.aw.bits) match {
case (lhs, rhs) => lhs :<>= rhs
}
val bid = out.b.bits.id
val b_valid = VecInit(wqueues.map(_.deq.valid))(bid)
val b_bits = VecInit(wqueues.map(_.deq.bits))(bid)
assert (!out.b.valid || b_valid) // Q must be ready faster than the response
Connectable.waiveUnmatched(in.b, out.b) match {
case (lhs, rhs) => lhs :<>= rhs
}
in.b.bits.echo :<>= b_bits
val awsel = UIntToOH(awid, edgeIn.master.endId).asBools
val bsel = UIntToOH(bid, edgeIn.master.endId).asBools
(wqueues zip (awsel zip bsel)) foreach { case (q, (aw, b)) =>
q.deq.ready := out.b .valid && in .b .ready && b
q.deq.valid := DontCare
q.deq.bits := DontCare
q.enq.valid := in .aw.valid && out.aw.ready && aw
q.enq.ready := DontCare
q.enq.bits :<>= in.aw.bits.echo
q.count := DontCare
}
out.w :<>= in.w
}
}
}
object AXI4UserYanker
{
def apply(capMaxFlight: Option[Int] = None)(implicit p: Parameters): AXI4Node =
{
val axi4yank = LazyModule(new AXI4UserYanker(capMaxFlight))
axi4yank.node
}
}
| module AXI4UserYanker( // @[UserYanker.scala:36:9]
input clock, // @[UserYanker.scala:36:9]
input reset, // @[UserYanker.scala:36:9]
output auto_in_aw_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_aw_valid, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_aw_bits_id, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_aw_bits_addr, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_aw_bits_len, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_aw_bits_size, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_in_aw_bits_burst, // @[LazyModuleImp.scala:107:25]
input auto_in_aw_bits_lock, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_aw_bits_cache, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_aw_bits_prot, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_aw_bits_qos, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_aw_bits_echo_tl_state_size, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_in_aw_bits_echo_tl_state_source, // @[LazyModuleImp.scala:107:25]
input auto_in_aw_bits_echo_extra_id, // @[LazyModuleImp.scala:107:25]
output auto_in_w_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_w_valid, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_w_bits_data, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_w_bits_strb, // @[LazyModuleImp.scala:107:25]
input auto_in_w_bits_last, // @[LazyModuleImp.scala:107:25]
input auto_in_b_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_b_valid, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_in_b_bits_id, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_b_bits_resp, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_in_b_bits_echo_tl_state_size, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_in_b_bits_echo_tl_state_source, // @[LazyModuleImp.scala:107:25]
output auto_in_b_bits_echo_extra_id, // @[LazyModuleImp.scala:107:25]
output auto_in_ar_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_ar_valid, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_ar_bits_id, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_ar_bits_addr, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_ar_bits_len, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_ar_bits_size, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_in_ar_bits_burst, // @[LazyModuleImp.scala:107:25]
input auto_in_ar_bits_lock, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_ar_bits_cache, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_ar_bits_prot, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_ar_bits_qos, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_ar_bits_echo_tl_state_size, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_in_ar_bits_echo_tl_state_source, // @[LazyModuleImp.scala:107:25]
input auto_in_ar_bits_echo_extra_id, // @[LazyModuleImp.scala:107:25]
input auto_in_r_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_r_valid, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_in_r_bits_id, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_r_bits_data, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_r_bits_resp, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_in_r_bits_echo_tl_state_size, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_in_r_bits_echo_tl_state_source, // @[LazyModuleImp.scala:107:25]
output auto_in_r_bits_echo_extra_id, // @[LazyModuleImp.scala:107:25]
output auto_in_r_bits_last, // @[LazyModuleImp.scala:107:25]
input auto_out_aw_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_aw_valid, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_aw_bits_id, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_aw_bits_addr, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_aw_bits_len, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_aw_bits_size, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_out_aw_bits_burst, // @[LazyModuleImp.scala:107:25]
output auto_out_aw_bits_lock, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_aw_bits_cache, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_aw_bits_prot, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_aw_bits_qos, // @[LazyModuleImp.scala:107:25]
input auto_out_w_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_w_valid, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_w_bits_data, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_w_bits_strb, // @[LazyModuleImp.scala:107:25]
output auto_out_w_bits_last, // @[LazyModuleImp.scala:107:25]
output auto_out_b_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_b_valid, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_b_bits_id, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_b_bits_resp, // @[LazyModuleImp.scala:107:25]
input auto_out_ar_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_ar_valid, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_ar_bits_id, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_ar_bits_addr, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_ar_bits_len, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_ar_bits_size, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_out_ar_bits_burst, // @[LazyModuleImp.scala:107:25]
output auto_out_ar_bits_lock, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_ar_bits_cache, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_ar_bits_prot, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_ar_bits_qos, // @[LazyModuleImp.scala:107:25]
output auto_out_r_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_r_valid, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_r_bits_id, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_r_bits_data, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_r_bits_resp, // @[LazyModuleImp.scala:107:25]
input auto_out_r_bits_last // @[LazyModuleImp.scala:107:25]
);
wire _Queue1_BundleMap_23_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_23_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue1_BundleMap_23_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue1_BundleMap_23_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_23_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_22_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_22_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue1_BundleMap_22_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue1_BundleMap_22_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_22_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_21_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_21_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue1_BundleMap_21_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue1_BundleMap_21_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_21_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_20_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_20_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue1_BundleMap_20_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue1_BundleMap_20_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_20_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_19_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_19_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue1_BundleMap_19_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue1_BundleMap_19_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_19_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_18_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_18_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue1_BundleMap_18_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue1_BundleMap_18_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_18_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_17_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_17_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue1_BundleMap_17_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue1_BundleMap_17_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_17_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_16_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_16_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue1_BundleMap_16_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue1_BundleMap_16_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_16_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_15_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_15_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue1_BundleMap_15_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue1_BundleMap_15_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_15_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_14_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_14_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue1_BundleMap_14_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue1_BundleMap_14_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_14_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_13_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_13_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue1_BundleMap_13_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue1_BundleMap_13_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_13_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_12_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_12_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue1_BundleMap_12_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue1_BundleMap_12_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_12_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue2_BundleMap_7_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue2_BundleMap_7_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue2_BundleMap_7_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue2_BundleMap_7_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue2_BundleMap_7_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue2_BundleMap_6_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue2_BundleMap_6_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue2_BundleMap_6_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue2_BundleMap_6_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue2_BundleMap_6_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue2_BundleMap_5_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue2_BundleMap_5_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue2_BundleMap_5_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue2_BundleMap_5_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue2_BundleMap_5_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue2_BundleMap_4_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue2_BundleMap_4_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue2_BundleMap_4_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue2_BundleMap_4_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue2_BundleMap_4_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_11_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_11_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue1_BundleMap_11_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue1_BundleMap_11_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_11_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_10_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_10_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue1_BundleMap_10_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue1_BundleMap_10_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_10_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_9_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_9_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue1_BundleMap_9_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue1_BundleMap_9_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_9_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_8_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_8_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue1_BundleMap_8_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue1_BundleMap_8_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_8_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_7_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_7_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue1_BundleMap_7_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue1_BundleMap_7_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_7_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_6_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_6_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue1_BundleMap_6_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue1_BundleMap_6_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_6_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_5_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_5_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue1_BundleMap_5_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue1_BundleMap_5_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_5_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_4_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_4_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue1_BundleMap_4_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue1_BundleMap_4_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_4_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_3_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_3_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue1_BundleMap_3_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue1_BundleMap_3_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_3_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_2_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_2_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue1_BundleMap_2_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue1_BundleMap_2_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_2_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_1_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_1_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue1_BundleMap_1_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue1_BundleMap_1_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_1_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue1_BundleMap_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue1_BundleMap_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue1_BundleMap_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue2_BundleMap_3_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue2_BundleMap_3_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue2_BundleMap_3_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue2_BundleMap_3_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue2_BundleMap_3_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue2_BundleMap_2_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue2_BundleMap_2_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue2_BundleMap_2_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue2_BundleMap_2_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue2_BundleMap_2_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue2_BundleMap_1_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue2_BundleMap_1_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue2_BundleMap_1_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue2_BundleMap_1_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue2_BundleMap_1_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire _Queue2_BundleMap_io_enq_ready; // @[UserYanker.scala:51:17]
wire _Queue2_BundleMap_io_deq_valid; // @[UserYanker.scala:51:17]
wire [3:0] _Queue2_BundleMap_io_deq_bits_tl_state_size; // @[UserYanker.scala:51:17]
wire [4:0] _Queue2_BundleMap_io_deq_bits_tl_state_source; // @[UserYanker.scala:51:17]
wire _Queue2_BundleMap_io_deq_bits_extra_id; // @[UserYanker.scala:51:17]
wire [15:0] _GEN = {{_Queue1_BundleMap_11_io_enq_ready}, {_Queue1_BundleMap_10_io_enq_ready}, {_Queue1_BundleMap_9_io_enq_ready}, {_Queue1_BundleMap_8_io_enq_ready}, {_Queue1_BundleMap_7_io_enq_ready}, {_Queue1_BundleMap_6_io_enq_ready}, {_Queue1_BundleMap_5_io_enq_ready}, {_Queue1_BundleMap_4_io_enq_ready}, {_Queue1_BundleMap_3_io_enq_ready}, {_Queue1_BundleMap_2_io_enq_ready}, {_Queue1_BundleMap_1_io_enq_ready}, {_Queue1_BundleMap_io_enq_ready}, {_Queue2_BundleMap_3_io_enq_ready}, {_Queue2_BundleMap_2_io_enq_ready}, {_Queue2_BundleMap_1_io_enq_ready}, {_Queue2_BundleMap_io_enq_ready}}; // @[UserYanker.scala:51:17, :60:36]
wire [15:0][3:0] _GEN_0 = {{_Queue1_BundleMap_11_io_deq_bits_tl_state_size}, {_Queue1_BundleMap_10_io_deq_bits_tl_state_size}, {_Queue1_BundleMap_9_io_deq_bits_tl_state_size}, {_Queue1_BundleMap_8_io_deq_bits_tl_state_size}, {_Queue1_BundleMap_7_io_deq_bits_tl_state_size}, {_Queue1_BundleMap_6_io_deq_bits_tl_state_size}, {_Queue1_BundleMap_5_io_deq_bits_tl_state_size}, {_Queue1_BundleMap_4_io_deq_bits_tl_state_size}, {_Queue1_BundleMap_3_io_deq_bits_tl_state_size}, {_Queue1_BundleMap_2_io_deq_bits_tl_state_size}, {_Queue1_BundleMap_1_io_deq_bits_tl_state_size}, {_Queue1_BundleMap_io_deq_bits_tl_state_size}, {_Queue2_BundleMap_3_io_deq_bits_tl_state_size}, {_Queue2_BundleMap_2_io_deq_bits_tl_state_size}, {_Queue2_BundleMap_1_io_deq_bits_tl_state_size}, {_Queue2_BundleMap_io_deq_bits_tl_state_size}}; // @[UserYanker.scala:51:17, :73:22]
wire [15:0][4:0] _GEN_1 = {{_Queue1_BundleMap_11_io_deq_bits_tl_state_source}, {_Queue1_BundleMap_10_io_deq_bits_tl_state_source}, {_Queue1_BundleMap_9_io_deq_bits_tl_state_source}, {_Queue1_BundleMap_8_io_deq_bits_tl_state_source}, {_Queue1_BundleMap_7_io_deq_bits_tl_state_source}, {_Queue1_BundleMap_6_io_deq_bits_tl_state_source}, {_Queue1_BundleMap_5_io_deq_bits_tl_state_source}, {_Queue1_BundleMap_4_io_deq_bits_tl_state_source}, {_Queue1_BundleMap_3_io_deq_bits_tl_state_source}, {_Queue1_BundleMap_2_io_deq_bits_tl_state_source}, {_Queue1_BundleMap_1_io_deq_bits_tl_state_source}, {_Queue1_BundleMap_io_deq_bits_tl_state_source}, {_Queue2_BundleMap_3_io_deq_bits_tl_state_source}, {_Queue2_BundleMap_2_io_deq_bits_tl_state_source}, {_Queue2_BundleMap_1_io_deq_bits_tl_state_source}, {_Queue2_BundleMap_io_deq_bits_tl_state_source}}; // @[UserYanker.scala:51:17, :73:22]
wire [15:0] _GEN_2 = {{_Queue1_BundleMap_11_io_deq_bits_extra_id}, {_Queue1_BundleMap_10_io_deq_bits_extra_id}, {_Queue1_BundleMap_9_io_deq_bits_extra_id}, {_Queue1_BundleMap_8_io_deq_bits_extra_id}, {_Queue1_BundleMap_7_io_deq_bits_extra_id}, {_Queue1_BundleMap_6_io_deq_bits_extra_id}, {_Queue1_BundleMap_5_io_deq_bits_extra_id}, {_Queue1_BundleMap_4_io_deq_bits_extra_id}, {_Queue1_BundleMap_3_io_deq_bits_extra_id}, {_Queue1_BundleMap_2_io_deq_bits_extra_id}, {_Queue1_BundleMap_1_io_deq_bits_extra_id}, {_Queue1_BundleMap_io_deq_bits_extra_id}, {_Queue2_BundleMap_3_io_deq_bits_extra_id}, {_Queue2_BundleMap_2_io_deq_bits_extra_id}, {_Queue2_BundleMap_1_io_deq_bits_extra_id}, {_Queue2_BundleMap_io_deq_bits_extra_id}}; // @[UserYanker.scala:51:17, :73:22]
wire _GEN_3 = auto_out_r_valid & auto_in_r_ready; // @[UserYanker.scala:78:37]
wire _GEN_4 = auto_in_ar_valid & auto_out_ar_ready; // @[UserYanker.scala:81:37]
wire [15:0] _GEN_5 = {{_Queue1_BundleMap_23_io_enq_ready}, {_Queue1_BundleMap_22_io_enq_ready}, {_Queue1_BundleMap_21_io_enq_ready}, {_Queue1_BundleMap_20_io_enq_ready}, {_Queue1_BundleMap_19_io_enq_ready}, {_Queue1_BundleMap_18_io_enq_ready}, {_Queue1_BundleMap_17_io_enq_ready}, {_Queue1_BundleMap_16_io_enq_ready}, {_Queue1_BundleMap_15_io_enq_ready}, {_Queue1_BundleMap_14_io_enq_ready}, {_Queue1_BundleMap_13_io_enq_ready}, {_Queue1_BundleMap_12_io_enq_ready}, {_Queue2_BundleMap_7_io_enq_ready}, {_Queue2_BundleMap_6_io_enq_ready}, {_Queue2_BundleMap_5_io_enq_ready}, {_Queue2_BundleMap_4_io_enq_ready}}; // @[UserYanker.scala:51:17, :89:36] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_56( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [1:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [10:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [20:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [10:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [1:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [10:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [20:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [10:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire _source_ok_T = 1'h0; // @[Parameters.scala:54:10]
wire _source_ok_T_6 = 1'h0; // @[Parameters.scala:54:10]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire a_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire a_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire a_first_count = 1'h0; // @[Edges.scala:234:25]
wire d_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count = 1'h0; // @[Edges.scala:234:25]
wire a_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59]
wire a_first_beats1_1 = 1'h0; // @[Edges.scala:221:14]
wire a_first_count_1 = 1'h0; // @[Edges.scala:234:25]
wire d_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1_1 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count_1 = 1'h0; // @[Edges.scala:234:25]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire c_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_first_count_T = 1'h0; // @[Edges.scala:234:27]
wire c_first_count = 1'h0; // @[Edges.scala:234:25]
wire _c_first_counter_T = 1'h0; // @[Edges.scala:236:21]
wire d_first_beats1_decode_2 = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1_2 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count_2 = 1'h0; // @[Edges.scala:234:25]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:54:67]
wire _source_ok_T_7 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_8 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:54:67]
wire _a_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire a_first_last = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last = 1'h1; // @[Edges.scala:232:33]
wire _a_first_last_T_3 = 1'h1; // @[Edges.scala:232:43]
wire a_first_last_1 = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_3 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last_1 = 1'h1; // @[Edges.scala:232:33]
wire c_first_counter1 = 1'h1; // @[Edges.scala:230:28]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_5 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last_2 = 1'h1; // @[Edges.scala:232:33]
wire [1:0] _c_first_counter1_T = 2'h3; // @[Edges.scala:230:28]
wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7]
wire [1:0] _c_first_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_first_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_first_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_first_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_set_wo_ready_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_set_wo_ready_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_opcodes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_opcodes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_sizes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_sizes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_opcodes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_opcodes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_sizes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_sizes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_probe_ack_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_probe_ack_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_probe_ack_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_probe_ack_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_4_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_5_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_first_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_first_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_first_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_first_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_set_wo_ready_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_set_wo_ready_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_opcodes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_opcodes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_sizes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_sizes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_opcodes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_opcodes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_sizes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_sizes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_probe_ack_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_probe_ack_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_probe_ack_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_probe_ack_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_4_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_5_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_first_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_first_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_first_WIRE_2_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_first_WIRE_3_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_set_wo_ready_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_set_wo_ready_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_set_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_set_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_opcodes_set_interm_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_opcodes_set_interm_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_sizes_set_interm_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_sizes_set_interm_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_opcodes_set_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_opcodes_set_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_sizes_set_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_sizes_set_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_probe_ack_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_probe_ack_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_probe_ack_WIRE_2_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_probe_ack_WIRE_3_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _same_cycle_resp_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _same_cycle_resp_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _same_cycle_resp_WIRE_2_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _same_cycle_resp_WIRE_3_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _same_cycle_resp_WIRE_4_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _same_cycle_resp_WIRE_5_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_beats1_decode_T_2 = 3'h0; // @[package.scala:243:46]
wire [2:0] c_sizes_set_interm = 3'h0; // @[Monitor.scala:755:40]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_T = 3'h0; // @[Monitor.scala:766:51]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [16385:0] _c_sizes_set_T_1 = 16386'h0; // @[Monitor.scala:768:52]
wire [13:0] _c_opcodes_set_T = 14'h0; // @[Monitor.scala:767:79]
wire [13:0] _c_sizes_set_T = 14'h0; // @[Monitor.scala:768:77]
wire [16386:0] _c_opcodes_set_T_1 = 16387'h0; // @[Monitor.scala:767:54]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] _c_sizes_set_interm_T_1 = 3'h1; // @[Monitor.scala:766:59]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [2047:0] _c_set_wo_ready_T = 2048'h1; // @[OneHot.scala:58:35]
wire [2047:0] _c_set_T = 2048'h1; // @[OneHot.scala:58:35]
wire [4159:0] c_opcodes_set = 4160'h0; // @[Monitor.scala:740:34]
wire [4159:0] c_sizes_set = 4160'h0; // @[Monitor.scala:741:34]
wire [1039:0] c_set = 1040'h0; // @[Monitor.scala:738:34]
wire [1039:0] c_set_wo_ready = 1040'h0; // @[Monitor.scala:739:34]
wire [2:0] _c_first_beats1_decode_T_1 = 3'h7; // @[package.scala:243:76]
wire [5:0] _c_first_beats1_decode_T = 6'h7; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [10:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _source_ok_uncommonBits_T_1 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] source_ok_uncommonBits = _source_ok_uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_4 = source_ok_uncommonBits < 11'h410; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_5 = _source_ok_T_4; // @[Parameters.scala:56:48, :57:20]
wire _source_ok_WIRE_0 = _source_ok_T_5; // @[Parameters.scala:1138:31]
wire [5:0] _GEN = 6'h7 << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [5:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [5:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [2:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [20:0] _is_aligned_T = {18'h0, io_in_a_bits_address_0[2:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 21'h0; // @[Edges.scala:21:{16,24}]
wire [2:0] _mask_sizeOH_T = {1'h0, io_in_a_bits_size_0}; // @[Misc.scala:202:34]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = &io_in_a_bits_size_0; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [10:0] uncommonBits = _uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_1 = _uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_2 = _uncommonBits_T_2; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_3 = _uncommonBits_T_3; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_4 = _uncommonBits_T_4; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_5 = _uncommonBits_T_5; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_6 = _uncommonBits_T_6; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_7 = _uncommonBits_T_7; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_8 = _uncommonBits_T_8; // @[Parameters.scala:52:{29,56}]
wire [10:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_10 = source_ok_uncommonBits_1 < 11'h410; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_11 = _source_ok_T_10; // @[Parameters.scala:56:48, :57:20]
wire _source_ok_WIRE_1_0 = _source_ok_T_11; // @[Parameters.scala:1138:31]
wire _T_665 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_665; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_665; // @[Decoupled.scala:51:35]
wire a_first_done = _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
reg a_first_counter; // @[Edges.scala:229:27]
wire _a_first_last_T = a_first_counter; // @[Edges.scala:229:27, :232:25]
wire [1:0] _a_first_counter1_T = {1'h0, a_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire a_first_counter1 = _a_first_counter1_T[0]; // @[Edges.scala:230:28]
wire a_first = ~a_first_counter; // @[Edges.scala:229:27, :231:25]
wire _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire _a_first_counter_T = ~a_first & a_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [1:0] size; // @[Monitor.scala:389:22]
reg [10:0] source; // @[Monitor.scala:390:22]
reg [20:0] address; // @[Monitor.scala:391:22]
wire _T_733 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_733; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_733; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_733; // @[Decoupled.scala:51:35]
wire d_first_done = _d_first_T; // @[Decoupled.scala:51:35]
wire [5:0] _GEN_0 = 6'h7 << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [2:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
reg d_first_counter; // @[Edges.scala:229:27]
wire _d_first_last_T = d_first_counter; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T = {1'h0, d_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1 = _d_first_counter1_T[0]; // @[Edges.scala:230:28]
wire d_first = ~d_first_counter; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T = ~d_first & d_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] size_1; // @[Monitor.scala:540:22]
reg [10:0] source_1; // @[Monitor.scala:541:22]
reg [1039:0] inflight; // @[Monitor.scala:614:27]
reg [4159:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [4159:0] inflight_sizes; // @[Monitor.scala:618:33]
wire a_first_done_1 = _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
reg a_first_counter_1; // @[Edges.scala:229:27]
wire _a_first_last_T_2 = a_first_counter_1; // @[Edges.scala:229:27, :232:25]
wire [1:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire a_first_counter1_1 = _a_first_counter1_T_1[0]; // @[Edges.scala:230:28]
wire a_first_1 = ~a_first_counter_1; // @[Edges.scala:229:27, :231:25]
wire _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire _a_first_counter_T_1 = ~a_first_1 & a_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21]
wire d_first_done_1 = _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
reg d_first_counter_1; // @[Edges.scala:229:27]
wire _d_first_last_T_2 = d_first_counter_1; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1_1 = _d_first_counter1_T_1[0]; // @[Edges.scala:230:28]
wire d_first_1 = ~d_first_counter_1; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T_1 = ~d_first_1 & d_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21]
wire [1039:0] a_set; // @[Monitor.scala:626:34]
wire [1039:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [4159:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [4159:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [13:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [13:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [13:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [13:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [13:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [13:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [13:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [13:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [13:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [4159:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [4159:0] _a_opcode_lookup_T_6 = {4156'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [4159:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[4159:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [4159:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [4159:0] _a_size_lookup_T_6 = {4156'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [4159:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[4159:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [2:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [2047:0] _GEN_2 = 2048'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [2047:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [2047:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[1039:0] : 1040'h0; // @[OneHot.scala:58:35]
wire _T_598 = _T_665 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_598 ? _a_set_T[1039:0] : 1040'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_598 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [2:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [2:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[2:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_598 ? _a_sizes_set_interm_T_1 : 3'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [13:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [13:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [13:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [16386:0] _a_opcodes_set_T_1 = {16383'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_598 ? _a_opcodes_set_T_1[4159:0] : 4160'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [16385:0] _a_sizes_set_T_1 = {16383'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_598 ? _a_sizes_set_T_1[4159:0] : 4160'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [1039:0] d_clr; // @[Monitor.scala:664:34]
wire [1039:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [4159:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [4159:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_644 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [2047:0] _GEN_5 = 2048'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [2047:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [2047:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [2047:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [2047:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_644 & ~d_release_ack ? _d_clr_wo_ready_T[1039:0] : 1040'h0; // @[OneHot.scala:58:35]
wire _T_613 = _T_733 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_613 ? _d_clr_T[1039:0] : 1040'h0; // @[OneHot.scala:58:35]
wire [16398:0] _d_opcodes_clr_T_5 = 16399'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_613 ? _d_opcodes_clr_T_5[4159:0] : 4160'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [16398:0] _d_sizes_clr_T_5 = 16399'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_613 ? _d_sizes_clr_T_5[4159:0] : 4160'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [1039:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [1039:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [1039:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [4159:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [4159:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [4159:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [4159:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [4159:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [4159:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [1039:0] inflight_1; // @[Monitor.scala:726:35]
wire [1039:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [4159:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [4159:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [4159:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [4159:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire d_first_done_2 = _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
reg d_first_counter_2; // @[Edges.scala:229:27]
wire _d_first_last_T_4 = d_first_counter_2; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1_2 = _d_first_counter1_T_2[0]; // @[Edges.scala:230:28]
wire d_first_2 = ~d_first_counter_2; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T_2 = ~d_first_2 & d_first_counter1_2; // @[Edges.scala:230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [4159:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [4159:0] _c_opcode_lookup_T_6 = {4156'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [4159:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[4159:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [4159:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [4159:0] _c_size_lookup_T_6 = {4156'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [4159:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[4159:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [1039:0] d_clr_1; // @[Monitor.scala:774:34]
wire [1039:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [4159:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [4159:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_709 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_709 & d_release_ack_1 ? _d_clr_wo_ready_T_1[1039:0] : 1040'h0; // @[OneHot.scala:58:35]
wire _T_691 = _T_733 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_691 ? _d_clr_T_1[1039:0] : 1040'h0; // @[OneHot.scala:58:35]
wire [16398:0] _d_opcodes_clr_T_11 = 16399'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_691 ? _d_opcodes_clr_T_11[4159:0] : 4160'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [16398:0] _d_sizes_clr_T_11 = 16399'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_691 ? _d_sizes_clr_T_11[4159:0] : 4160'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 11'h0; // @[Monitor.scala:36:7, :795:113]
wire [1039:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [1039:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [4159:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [4159:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [4159:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [4159:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.